3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
68 #include <asm/uaccess.h>
70 #include <linux/proc_fs.h>
71 #include <linux/seq_file.h>
73 #include <linux/crypto.h>
74 #include <linux/scatterlist.h>
76 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
77 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
78 struct request_sock
*req
);
80 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
82 static const struct inet_connection_sock_af_ops ipv6_mapped
;
83 static const struct inet_connection_sock_af_ops ipv6_specific
;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
88 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
89 const struct in6_addr
*addr
)
95 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
97 struct dst_entry
*dst
= skb_dst(skb
);
98 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
102 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
104 inet6_sk(sk
)->rx_dst_cookie
= rt
->rt6i_node
->fn_sernum
;
107 static void tcp_v6_hash(struct sock
*sk
)
109 if (sk
->sk_state
!= TCP_CLOSE
) {
110 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
115 __inet6_hash(sk
, NULL
);
120 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
123 ipv6_hdr(skb
)->saddr
.s6_addr32
,
125 tcp_hdr(skb
)->source
);
128 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
131 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
132 struct inet_sock
*inet
= inet_sk(sk
);
133 struct inet_connection_sock
*icsk
= inet_csk(sk
);
134 struct ipv6_pinfo
*np
= inet6_sk(sk
);
135 struct tcp_sock
*tp
= tcp_sk(sk
);
136 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
139 struct dst_entry
*dst
;
143 if (addr_len
< SIN6_LEN_RFC2133
)
146 if (usin
->sin6_family
!= AF_INET6
)
147 return -EAFNOSUPPORT
;
149 memset(&fl6
, 0, sizeof(fl6
));
152 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
153 IP6_ECN_flow_init(fl6
.flowlabel
);
154 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
155 struct ip6_flowlabel
*flowlabel
;
156 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
157 if (flowlabel
== NULL
)
159 fl6_sock_release(flowlabel
);
164 * connect() to INADDR_ANY means loopback (BSD'ism).
167 if (ipv6_addr_any(&usin
->sin6_addr
))
168 usin
->sin6_addr
.s6_addr
[15] = 0x1;
170 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
172 if (addr_type
& IPV6_ADDR_MULTICAST
)
175 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
176 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
177 usin
->sin6_scope_id
) {
178 /* If interface is set while binding, indices
181 if (sk
->sk_bound_dev_if
&&
182 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
185 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
188 /* Connect to link-local address requires an interface */
189 if (!sk
->sk_bound_dev_if
)
193 if (tp
->rx_opt
.ts_recent_stamp
&&
194 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
195 tp
->rx_opt
.ts_recent
= 0;
196 tp
->rx_opt
.ts_recent_stamp
= 0;
200 sk
->sk_v6_daddr
= usin
->sin6_addr
;
201 np
->flow_label
= fl6
.flowlabel
;
207 if (addr_type
== IPV6_ADDR_MAPPED
) {
208 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
209 struct sockaddr_in sin
;
211 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
213 if (__ipv6_only_sock(sk
))
216 sin
.sin_family
= AF_INET
;
217 sin
.sin_port
= usin
->sin6_port
;
218 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
220 icsk
->icsk_af_ops
= &ipv6_mapped
;
221 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
226 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
229 icsk
->icsk_ext_hdr_len
= exthdrlen
;
230 icsk
->icsk_af_ops
= &ipv6_specific
;
231 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp
->af_specific
= &tcp_sock_ipv6_specific
;
237 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
238 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
239 &sk
->sk_v6_rcv_saddr
);
245 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
246 saddr
= &sk
->sk_v6_rcv_saddr
;
248 fl6
.flowi6_proto
= IPPROTO_TCP
;
249 fl6
.daddr
= sk
->sk_v6_daddr
;
250 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
251 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
252 fl6
.flowi6_mark
= sk
->sk_mark
;
253 fl6
.fl6_dport
= usin
->sin6_port
;
254 fl6
.fl6_sport
= inet
->inet_sport
;
256 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
258 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
260 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
268 sk
->sk_v6_rcv_saddr
= *saddr
;
271 /* set the source address */
273 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
275 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
276 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
278 rt
= (struct rt6_info
*) dst
;
279 if (tcp_death_row
.sysctl_tw_recycle
&&
280 !tp
->rx_opt
.ts_recent_stamp
&&
281 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &sk
->sk_v6_daddr
))
282 tcp_fetch_timewait_stamp(sk
, dst
);
284 icsk
->icsk_ext_hdr_len
= 0;
286 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
289 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
291 inet
->inet_dport
= usin
->sin6_port
;
293 tcp_set_state(sk
, TCP_SYN_SENT
);
294 err
= inet6_hash_connect(&tcp_death_row
, sk
);
298 if (!tp
->write_seq
&& likely(!tp
->repair
))
299 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
300 sk
->sk_v6_daddr
.s6_addr32
,
304 err
= tcp_connect(sk
);
311 tcp_set_state(sk
, TCP_CLOSE
);
314 inet
->inet_dport
= 0;
315 sk
->sk_route_caps
= 0;
319 static void tcp_v6_mtu_reduced(struct sock
*sk
)
321 struct dst_entry
*dst
;
323 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
326 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
330 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
331 tcp_sync_mss(sk
, dst_mtu(dst
));
332 tcp_simple_retransmit(sk
);
336 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
337 u8 type
, u8 code
, int offset
, __be32 info
)
339 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
340 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
341 struct ipv6_pinfo
*np
;
346 struct net
*net
= dev_net(skb
->dev
);
348 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
349 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
352 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
357 if (sk
->sk_state
== TCP_TIME_WAIT
) {
358 inet_twsk_put(inet_twsk(sk
));
363 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
364 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
366 if (sk
->sk_state
== TCP_CLOSE
)
369 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
370 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
375 seq
= ntohl(th
->seq
);
376 if (sk
->sk_state
!= TCP_LISTEN
&&
377 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
378 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
384 if (type
== NDISC_REDIRECT
) {
385 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
388 dst
->ops
->redirect(dst
, sk
, skb
);
392 if (type
== ICMPV6_PKT_TOOBIG
) {
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
397 if (sk
->sk_state
== TCP_LISTEN
)
400 if (!ip6_sk_accept_pmtu(sk
))
403 tp
->mtu_info
= ntohl(info
);
404 if (!sock_owned_by_user(sk
))
405 tcp_v6_mtu_reduced(sk
);
406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
412 icmpv6_err_convert(type
, code
, &err
);
414 /* Might be for an request_sock */
415 switch (sk
->sk_state
) {
416 struct request_sock
*req
, **prev
;
418 if (sock_owned_by_user(sk
))
421 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
422 &hdr
->saddr
, inet6_iif(skb
));
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
429 WARN_ON(req
->sk
!= NULL
);
431 if (seq
!= tcp_rsk(req
)->snt_isn
) {
432 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
436 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
437 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
441 case TCP_SYN_RECV
: /* Cannot happen.
442 It can, it SYNs are crossed. --ANK */
443 if (!sock_owned_by_user(sk
)) {
445 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
449 sk
->sk_err_soft
= err
;
453 if (!sock_owned_by_user(sk
) && np
->recverr
) {
455 sk
->sk_error_report(sk
);
457 sk
->sk_err_soft
= err
;
465 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
467 struct request_sock
*req
,
470 struct inet_request_sock
*ireq
= inet_rsk(req
);
471 struct ipv6_pinfo
*np
= inet6_sk(sk
);
475 /* First, grab a route. */
476 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
479 skb
= tcp_make_synack(sk
, dst
, req
, NULL
);
482 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
483 &ireq
->ir_v6_rmt_addr
);
485 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
486 if (np
->repflow
&& (ireq
->pktopts
!= NULL
))
487 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
489 skb_set_queue_mapping(skb
, queue_mapping
);
490 err
= ip6_xmit(sk
, skb
, fl6
, np
->opt
, np
->tclass
);
491 err
= net_xmit_eval(err
);
498 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
)
503 res
= tcp_v6_send_synack(sk
, NULL
, &fl6
, req
, 0);
505 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
509 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
511 kfree_skb(inet_rsk(req
)->pktopts
);
514 #ifdef CONFIG_TCP_MD5SIG
515 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
516 const struct in6_addr
*addr
)
518 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
521 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
522 struct sock
*addr_sk
)
524 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
527 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
528 struct request_sock
*req
)
530 return tcp_v6_md5_do_lookup(sk
, &inet_rsk(req
)->ir_v6_rmt_addr
);
533 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
536 struct tcp_md5sig cmd
;
537 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
539 if (optlen
< sizeof(cmd
))
542 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
545 if (sin6
->sin6_family
!= AF_INET6
)
548 if (!cmd
.tcpm_keylen
) {
549 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
550 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
552 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
556 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
559 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
560 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
561 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
563 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
564 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
567 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
568 const struct in6_addr
*daddr
,
569 const struct in6_addr
*saddr
, int nbytes
)
571 struct tcp6_pseudohdr
*bp
;
572 struct scatterlist sg
;
574 bp
= &hp
->md5_blk
.ip6
;
575 /* 1. TCP pseudo-header (RFC2460) */
578 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
579 bp
->len
= cpu_to_be32(nbytes
);
581 sg_init_one(&sg
, bp
, sizeof(*bp
));
582 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
585 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
586 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
587 const struct tcphdr
*th
)
589 struct tcp_md5sig_pool
*hp
;
590 struct hash_desc
*desc
;
592 hp
= tcp_get_md5sig_pool();
594 goto clear_hash_noput
;
595 desc
= &hp
->md5_desc
;
597 if (crypto_hash_init(desc
))
599 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
601 if (tcp_md5_hash_header(hp
, th
))
603 if (tcp_md5_hash_key(hp
, key
))
605 if (crypto_hash_final(desc
, md5_hash
))
608 tcp_put_md5sig_pool();
612 tcp_put_md5sig_pool();
614 memset(md5_hash
, 0, 16);
618 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
619 const struct sock
*sk
,
620 const struct request_sock
*req
,
621 const struct sk_buff
*skb
)
623 const struct in6_addr
*saddr
, *daddr
;
624 struct tcp_md5sig_pool
*hp
;
625 struct hash_desc
*desc
;
626 const struct tcphdr
*th
= tcp_hdr(skb
);
629 saddr
= &inet6_sk(sk
)->saddr
;
630 daddr
= &sk
->sk_v6_daddr
;
632 saddr
= &inet_rsk(req
)->ir_v6_loc_addr
;
633 daddr
= &inet_rsk(req
)->ir_v6_rmt_addr
;
635 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
636 saddr
= &ip6h
->saddr
;
637 daddr
= &ip6h
->daddr
;
640 hp
= tcp_get_md5sig_pool();
642 goto clear_hash_noput
;
643 desc
= &hp
->md5_desc
;
645 if (crypto_hash_init(desc
))
648 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
650 if (tcp_md5_hash_header(hp
, th
))
652 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
654 if (tcp_md5_hash_key(hp
, key
))
656 if (crypto_hash_final(desc
, md5_hash
))
659 tcp_put_md5sig_pool();
663 tcp_put_md5sig_pool();
665 memset(md5_hash
, 0, 16);
669 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
671 const __u8
*hash_location
= NULL
;
672 struct tcp_md5sig_key
*hash_expected
;
673 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
674 const struct tcphdr
*th
= tcp_hdr(skb
);
678 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
679 hash_location
= tcp_parse_md5sig_option(th
);
681 /* We've parsed the options - do we have a hash? */
682 if (!hash_expected
&& !hash_location
)
685 if (hash_expected
&& !hash_location
) {
686 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
690 if (!hash_expected
&& hash_location
) {
691 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
695 /* check the signature */
696 genhash
= tcp_v6_md5_hash_skb(newhash
,
700 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
701 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
702 genhash
? "failed" : "mismatch",
703 &ip6h
->saddr
, ntohs(th
->source
),
704 &ip6h
->daddr
, ntohs(th
->dest
));
711 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
713 .obj_size
= sizeof(struct tcp6_request_sock
),
714 .rtx_syn_ack
= tcp_v6_rtx_synack
,
715 .send_ack
= tcp_v6_reqsk_send_ack
,
716 .destructor
= tcp_v6_reqsk_destructor
,
717 .send_reset
= tcp_v6_send_reset
,
718 .syn_ack_timeout
= tcp_syn_ack_timeout
,
721 #ifdef CONFIG_TCP_MD5SIG
722 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
723 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
724 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
728 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
729 u32 tsval
, u32 tsecr
,
730 struct tcp_md5sig_key
*key
, int rst
, u8 tclass
,
733 const struct tcphdr
*th
= tcp_hdr(skb
);
735 struct sk_buff
*buff
;
737 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
738 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
739 unsigned int tot_len
= sizeof(struct tcphdr
);
740 struct dst_entry
*dst
;
744 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
745 #ifdef CONFIG_TCP_MD5SIG
747 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
750 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
755 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
757 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
758 skb_reset_transport_header(buff
);
760 /* Swap the send and the receive. */
761 memset(t1
, 0, sizeof(*t1
));
762 t1
->dest
= th
->source
;
763 t1
->source
= th
->dest
;
764 t1
->doff
= tot_len
/ 4;
765 t1
->seq
= htonl(seq
);
766 t1
->ack_seq
= htonl(ack
);
767 t1
->ack
= !rst
|| !th
->ack
;
769 t1
->window
= htons(win
);
771 topt
= (__be32
*)(t1
+ 1);
774 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
775 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
776 *topt
++ = htonl(tsval
);
777 *topt
++ = htonl(tsecr
);
780 #ifdef CONFIG_TCP_MD5SIG
782 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
783 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
784 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
785 &ipv6_hdr(skb
)->saddr
,
786 &ipv6_hdr(skb
)->daddr
, t1
);
790 memset(&fl6
, 0, sizeof(fl6
));
791 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
792 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
793 fl6
.flowlabel
= label
;
795 buff
->ip_summed
= CHECKSUM_PARTIAL
;
798 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
800 fl6
.flowi6_proto
= IPPROTO_TCP
;
801 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
802 fl6
.flowi6_oif
= inet6_iif(skb
);
803 fl6
.fl6_dport
= t1
->dest
;
804 fl6
.fl6_sport
= t1
->source
;
805 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
807 /* Pass a socket to ip6_dst_lookup either it is for RST
808 * Underlying function will use this to retrieve the network
811 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
813 skb_dst_set(buff
, dst
);
814 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
815 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
817 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
824 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
826 const struct tcphdr
*th
= tcp_hdr(skb
);
827 u32 seq
= 0, ack_seq
= 0;
828 struct tcp_md5sig_key
*key
= NULL
;
829 #ifdef CONFIG_TCP_MD5SIG
830 const __u8
*hash_location
= NULL
;
831 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
832 unsigned char newhash
[16];
834 struct sock
*sk1
= NULL
;
840 if (!ipv6_unicast_destination(skb
))
843 #ifdef CONFIG_TCP_MD5SIG
844 hash_location
= tcp_parse_md5sig_option(th
);
845 if (!sk
&& hash_location
) {
847 * active side is lost. Try to find listening socket through
848 * source port, and then find md5 key through listening socket.
849 * we are not loose security here:
850 * Incoming packet is checked with md5 hash with finding key,
851 * no RST generated if md5 hash doesn't match.
853 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
854 &tcp_hashinfo
, &ipv6h
->saddr
,
855 th
->source
, &ipv6h
->daddr
,
856 ntohs(th
->source
), inet6_iif(skb
));
861 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
865 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
866 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
869 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
874 seq
= ntohl(th
->ack_seq
);
876 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
879 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, 0, key
, 1, 0, 0);
881 #ifdef CONFIG_TCP_MD5SIG
890 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
891 u32 win
, u32 tsval
, u32 tsecr
,
892 struct tcp_md5sig_key
*key
, u8 tclass
,
895 tcp_v6_send_response(skb
, seq
, ack
, win
, tsval
, tsecr
, key
, 0, tclass
,
899 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
901 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
902 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
904 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
905 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
906 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
907 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
908 tw
->tw_tclass
, (tw
->tw_flowlabel
<< 12));
913 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
914 struct request_sock
*req
)
916 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1,
917 req
->rcv_wnd
, tcp_time_stamp
, req
->ts_recent
,
918 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
923 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
925 struct request_sock
*req
, **prev
;
926 const struct tcphdr
*th
= tcp_hdr(skb
);
929 /* Find possible connection requests. */
930 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
931 &ipv6_hdr(skb
)->saddr
,
932 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
934 return tcp_check_req(sk
, skb
, req
, prev
, false);
936 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
937 &ipv6_hdr(skb
)->saddr
, th
->source
,
938 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
941 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
945 inet_twsk_put(inet_twsk(nsk
));
949 #ifdef CONFIG_SYN_COOKIES
951 sk
= cookie_v6_check(sk
, skb
);
956 /* FIXME: this is substantially similar to the ipv4 code.
957 * Can some kind of merge be done? -- erics
959 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
961 struct tcp_options_received tmp_opt
;
962 struct request_sock
*req
;
963 struct inet_request_sock
*ireq
;
964 struct ipv6_pinfo
*np
= inet6_sk(sk
);
965 struct tcp_sock
*tp
= tcp_sk(sk
);
966 __u32 isn
= TCP_SKB_CB(skb
)->when
;
967 struct dst_entry
*dst
= NULL
;
969 bool want_cookie
= false;
971 if (skb
->protocol
== htons(ETH_P_IP
))
972 return tcp_v4_conn_request(sk
, skb
);
974 if (!ipv6_unicast_destination(skb
))
977 if ((sysctl_tcp_syncookies
== 2 ||
978 inet_csk_reqsk_queue_is_full(sk
)) && !isn
) {
979 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
984 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
985 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
989 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
993 #ifdef CONFIG_TCP_MD5SIG
994 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
997 tcp_clear_options(&tmp_opt
);
998 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
999 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1000 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
1002 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1003 tcp_clear_options(&tmp_opt
);
1005 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1006 tcp_openreq_init(req
, &tmp_opt
, skb
);
1008 ireq
= inet_rsk(req
);
1009 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
1010 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
1011 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1012 TCP_ECN_create_request(req
, skb
, sock_net(sk
));
1014 ireq
->ir_iif
= sk
->sk_bound_dev_if
;
1016 /* So that link locals have meaning */
1017 if (!sk
->sk_bound_dev_if
&&
1018 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1019 ireq
->ir_iif
= inet6_iif(skb
);
1022 if (ipv6_opt_accepted(sk
, skb
) ||
1023 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1024 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
||
1026 atomic_inc(&skb
->users
);
1027 ireq
->pktopts
= skb
;
1031 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1032 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1036 /* VJ's idea. We save last timestamp seen
1037 * from the destination in peer table, when entering
1038 * state TIME-WAIT, and check against it before
1039 * accepting new connection request.
1041 * If "isn" is not zero, this request hit alive
1042 * timewait bucket, so that all the necessary checks
1043 * are made in the function processing timewait state.
1045 if (tmp_opt
.saw_tstamp
&&
1046 tcp_death_row
.sysctl_tw_recycle
&&
1047 (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) != NULL
) {
1048 if (!tcp_peer_is_proven(req
, dst
, true)) {
1049 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1050 goto drop_and_release
;
1053 /* Kill the following clause, if you dislike this way. */
1054 else if (!sysctl_tcp_syncookies
&&
1055 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1056 (sysctl_max_syn_backlog
>> 2)) &&
1057 !tcp_peer_is_proven(req
, dst
, false)) {
1058 /* Without syncookies last quarter of
1059 * backlog is filled with destinations,
1060 * proven to be alive.
1061 * It means that we continue to communicate
1062 * to destinations, already remembered
1063 * to the moment of synflood.
1065 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1066 &ireq
->ir_v6_rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1067 goto drop_and_release
;
1070 isn
= tcp_v6_init_sequence(skb
);
1073 tcp_rsk(req
)->snt_isn
= isn
;
1075 if (security_inet_conn_request(sk
, skb
, req
))
1076 goto drop_and_release
;
1078 if (tcp_v6_send_synack(sk
, dst
, &fl6
, req
,
1079 skb_get_queue_mapping(skb
)) ||
1083 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1084 tcp_rsk(req
)->listener
= NULL
;
1085 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1093 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1094 return 0; /* don't send reset */
1097 static struct sock
*tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1098 struct request_sock
*req
,
1099 struct dst_entry
*dst
)
1101 struct inet_request_sock
*ireq
;
1102 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1103 struct tcp6_sock
*newtcp6sk
;
1104 struct inet_sock
*newinet
;
1105 struct tcp_sock
*newtp
;
1107 #ifdef CONFIG_TCP_MD5SIG
1108 struct tcp_md5sig_key
*key
;
1112 if (skb
->protocol
== htons(ETH_P_IP
)) {
1117 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1122 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1123 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1125 newinet
= inet_sk(newsk
);
1126 newnp
= inet6_sk(newsk
);
1127 newtp
= tcp_sk(newsk
);
1129 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1131 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newsk
->sk_v6_daddr
);
1133 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1135 newsk
->sk_v6_rcv_saddr
= newnp
->saddr
;
1137 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1138 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1139 #ifdef CONFIG_TCP_MD5SIG
1140 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1143 newnp
->ipv6_ac_list
= NULL
;
1144 newnp
->ipv6_fl_list
= NULL
;
1145 newnp
->pktoptions
= NULL
;
1147 newnp
->mcast_oif
= inet6_iif(skb
);
1148 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1149 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1151 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1154 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1155 * here, tcp_create_openreq_child now does this for us, see the comment in
1156 * that function for the gory details. -acme
1159 /* It is tricky place. Until this moment IPv4 tcp
1160 worked with IPv6 icsk.icsk_af_ops.
1163 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1168 ireq
= inet_rsk(req
);
1170 if (sk_acceptq_is_full(sk
))
1174 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1179 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1184 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1185 * count here, tcp_create_openreq_child now does this for us, see the
1186 * comment in that function for the gory details. -acme
1189 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1190 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1191 inet6_sk_rx_dst_set(newsk
, skb
);
1193 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1194 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1196 newtp
= tcp_sk(newsk
);
1197 newinet
= inet_sk(newsk
);
1198 newnp
= inet6_sk(newsk
);
1200 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1202 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1203 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1204 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1205 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1207 /* Now IPv6 options...
1209 First: no IPv4 options.
1211 newinet
->inet_opt
= NULL
;
1212 newnp
->ipv6_ac_list
= NULL
;
1213 newnp
->ipv6_fl_list
= NULL
;
1216 newnp
->rxopt
.all
= np
->rxopt
.all
;
1218 /* Clone pktoptions received with SYN */
1219 newnp
->pktoptions
= NULL
;
1220 if (ireq
->pktopts
!= NULL
) {
1221 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1222 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1223 consume_skb(ireq
->pktopts
);
1224 ireq
->pktopts
= NULL
;
1225 if (newnp
->pktoptions
)
1226 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1229 newnp
->mcast_oif
= inet6_iif(skb
);
1230 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1231 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1233 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1235 /* Clone native IPv6 options from listening socket (if any)
1237 Yes, keeping reference count would be much more clever,
1238 but we make one more one thing there: reattach optmem
1242 newnp
->opt
= ipv6_dup_options(newsk
, np
->opt
);
1244 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1246 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1247 newnp
->opt
->opt_flen
);
1249 tcp_sync_mss(newsk
, dst_mtu(dst
));
1250 newtp
->advmss
= dst_metric_advmss(dst
);
1251 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1252 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1253 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1255 tcp_initialize_rcv_mss(newsk
);
1257 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1258 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1260 #ifdef CONFIG_TCP_MD5SIG
1261 /* Copy over the MD5 key from the original socket */
1262 if ((key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
)) != NULL
) {
1263 /* We're using one, so create a matching key
1264 * on the newsk structure. If we fail to get
1265 * memory, then we end up not copying the key
1268 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1269 AF_INET6
, key
->key
, key
->keylen
,
1270 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1274 if (__inet_inherit_port(sk
, newsk
) < 0) {
1275 inet_csk_prepare_forced_close(newsk
);
1279 __inet6_hash(newsk
, NULL
);
1284 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1288 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1292 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1294 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1295 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1296 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1297 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1302 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1303 &ipv6_hdr(skb
)->saddr
,
1304 &ipv6_hdr(skb
)->daddr
, 0));
1306 if (skb
->len
<= 76) {
1307 return __skb_checksum_complete(skb
);
1312 /* The socket must have it's spinlock held when we get
1315 * We have a potential double-lock case here, so even when
1316 * doing backlog processing we use the BH locking scheme.
1317 * This is because we cannot sleep with the original spinlock
1320 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1322 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1323 struct tcp_sock
*tp
;
1324 struct sk_buff
*opt_skb
= NULL
;
1326 /* Imagine: socket is IPv6. IPv4 packet arrives,
1327 goes to IPv4 receive handler and backlogged.
1328 From backlog it always goes here. Kerboom...
1329 Fortunately, tcp_rcv_established and rcv_established
1330 handle them correctly, but it is not case with
1331 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1334 if (skb
->protocol
== htons(ETH_P_IP
))
1335 return tcp_v4_do_rcv(sk
, skb
);
1337 #ifdef CONFIG_TCP_MD5SIG
1338 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1342 if (sk_filter(sk
, skb
))
1346 * socket locking is here for SMP purposes as backlog rcv
1347 * is currently called with bh processing disabled.
1350 /* Do Stevens' IPV6_PKTOPTIONS.
1352 Yes, guys, it is the only place in our code, where we
1353 may make it not affecting IPv4.
1354 The rest of code is protocol independent,
1355 and I do not like idea to uglify IPv4.
1357 Actually, all the idea behind IPV6_PKTOPTIONS
1358 looks not very well thought. For now we latch
1359 options, received in the last packet, enqueued
1360 by tcp. Feel free to propose better solution.
1364 opt_skb
= skb_clone(skb
, sk_gfp_atomic(sk
, GFP_ATOMIC
));
1366 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1367 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1369 sock_rps_save_rxhash(sk
, skb
);
1371 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1372 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1374 sk
->sk_rx_dst
= NULL
;
1378 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1380 goto ipv6_pktoptions
;
1384 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1387 if (sk
->sk_state
== TCP_LISTEN
) {
1388 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1393 * Queue it on the new socket if the new socket is active,
1394 * otherwise we just shortcircuit this and continue with
1398 sock_rps_save_rxhash(nsk
, skb
);
1399 if (tcp_child_process(sk
, nsk
, skb
))
1402 __kfree_skb(opt_skb
);
1406 sock_rps_save_rxhash(sk
, skb
);
1408 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1411 goto ipv6_pktoptions
;
1415 tcp_v6_send_reset(sk
, skb
);
1418 __kfree_skb(opt_skb
);
1422 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1423 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1428 /* Do you ask, what is it?
1430 1. skb was enqueued by tcp.
1431 2. skb is added to tail of read queue, rather than out of order.
1432 3. socket is not in passive state.
1433 4. Finally, it really contains options, which user wants to receive.
1436 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1437 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1438 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1439 np
->mcast_oif
= inet6_iif(opt_skb
);
1440 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1441 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1442 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1443 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1445 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1446 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1447 skb_set_owner_r(opt_skb
, sk
);
1448 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1450 __kfree_skb(opt_skb
);
1451 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1459 static int tcp_v6_rcv(struct sk_buff
*skb
)
1461 const struct tcphdr
*th
;
1462 const struct ipv6hdr
*hdr
;
1465 struct net
*net
= dev_net(skb
->dev
);
1467 if (skb
->pkt_type
!= PACKET_HOST
)
1471 * Count it even if it's bad.
1473 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1475 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1480 if (th
->doff
< sizeof(struct tcphdr
)/4)
1482 if (!pskb_may_pull(skb
, th
->doff
*4))
1485 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1489 hdr
= ipv6_hdr(skb
);
1490 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1491 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1492 skb
->len
- th
->doff
*4);
1493 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1494 TCP_SKB_CB(skb
)->when
= 0;
1495 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1496 TCP_SKB_CB(skb
)->sacked
= 0;
1498 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1503 if (sk
->sk_state
== TCP_TIME_WAIT
)
1506 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1507 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1508 goto discard_and_relse
;
1511 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1512 goto discard_and_relse
;
1514 if (sk_filter(sk
, skb
))
1515 goto discard_and_relse
;
1517 sk_mark_napi_id(sk
, skb
);
1520 bh_lock_sock_nested(sk
);
1522 if (!sock_owned_by_user(sk
)) {
1523 #ifdef CONFIG_NET_DMA
1524 struct tcp_sock
*tp
= tcp_sk(sk
);
1525 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1526 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1527 if (tp
->ucopy
.dma_chan
)
1528 ret
= tcp_v6_do_rcv(sk
, skb
);
1532 if (!tcp_prequeue(sk
, skb
))
1533 ret
= tcp_v6_do_rcv(sk
, skb
);
1535 } else if (unlikely(sk_add_backlog(sk
, skb
,
1536 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1538 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1539 goto discard_and_relse
;
1544 return ret
? -1 : 0;
1547 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1550 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1552 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1554 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1556 tcp_v6_send_reset(NULL
, skb
);
1568 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1569 inet_twsk_put(inet_twsk(sk
));
1573 if (skb
->len
< (th
->doff
<<2)) {
1574 inet_twsk_put(inet_twsk(sk
));
1577 if (tcp_checksum_complete(skb
)) {
1578 inet_twsk_put(inet_twsk(sk
));
1582 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1587 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1588 &ipv6_hdr(skb
)->saddr
, th
->source
,
1589 &ipv6_hdr(skb
)->daddr
,
1590 ntohs(th
->dest
), inet6_iif(skb
));
1592 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1593 inet_twsk_deschedule(tw
, &tcp_death_row
);
1598 /* Fall through to ACK */
1601 tcp_v6_timewait_ack(sk
, skb
);
1605 case TCP_TW_SUCCESS
:;
1610 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1612 const struct ipv6hdr
*hdr
;
1613 const struct tcphdr
*th
;
1616 if (skb
->pkt_type
!= PACKET_HOST
)
1619 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1622 hdr
= ipv6_hdr(skb
);
1625 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1628 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1629 &hdr
->saddr
, th
->source
,
1630 &hdr
->daddr
, ntohs(th
->dest
),
1634 skb
->destructor
= sock_edemux
;
1635 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1636 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1639 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1641 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1642 skb_dst_set_noref(skb
, dst
);
1647 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1648 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1649 .twsk_unique
= tcp_twsk_unique
,
1650 .twsk_destructor
= tcp_twsk_destructor
,
1653 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1654 .queue_xmit
= inet6_csk_xmit
,
1655 .send_check
= tcp_v6_send_check
,
1656 .rebuild_header
= inet6_sk_rebuild_header
,
1657 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1658 .conn_request
= tcp_v6_conn_request
,
1659 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1660 .net_header_len
= sizeof(struct ipv6hdr
),
1661 .net_frag_header_len
= sizeof(struct frag_hdr
),
1662 .setsockopt
= ipv6_setsockopt
,
1663 .getsockopt
= ipv6_getsockopt
,
1664 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1665 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1666 .bind_conflict
= inet6_csk_bind_conflict
,
1667 #ifdef CONFIG_COMPAT
1668 .compat_setsockopt
= compat_ipv6_setsockopt
,
1669 .compat_getsockopt
= compat_ipv6_getsockopt
,
1673 #ifdef CONFIG_TCP_MD5SIG
1674 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1675 .md5_lookup
= tcp_v6_md5_lookup
,
1676 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1677 .md5_parse
= tcp_v6_parse_md5_keys
,
1682 * TCP over IPv4 via INET6 API
1685 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1686 .queue_xmit
= ip_queue_xmit
,
1687 .send_check
= tcp_v4_send_check
,
1688 .rebuild_header
= inet_sk_rebuild_header
,
1689 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1690 .conn_request
= tcp_v6_conn_request
,
1691 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1692 .net_header_len
= sizeof(struct iphdr
),
1693 .setsockopt
= ipv6_setsockopt
,
1694 .getsockopt
= ipv6_getsockopt
,
1695 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1696 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1697 .bind_conflict
= inet6_csk_bind_conflict
,
1698 #ifdef CONFIG_COMPAT
1699 .compat_setsockopt
= compat_ipv6_setsockopt
,
1700 .compat_getsockopt
= compat_ipv6_getsockopt
,
1704 #ifdef CONFIG_TCP_MD5SIG
1705 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1706 .md5_lookup
= tcp_v4_md5_lookup
,
1707 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1708 .md5_parse
= tcp_v6_parse_md5_keys
,
1712 /* NOTE: A lot of things set to zero explicitly by call to
1713 * sk_alloc() so need not be done here.
1715 static int tcp_v6_init_sock(struct sock
*sk
)
1717 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1721 icsk
->icsk_af_ops
= &ipv6_specific
;
1723 #ifdef CONFIG_TCP_MD5SIG
1724 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1730 static void tcp_v6_destroy_sock(struct sock
*sk
)
1732 tcp_v4_destroy_sock(sk
);
1733 inet6_destroy_sock(sk
);
1736 #ifdef CONFIG_PROC_FS
1737 /* Proc filesystem TCPv6 sock list dumping. */
1738 static void get_openreq6(struct seq_file
*seq
,
1739 const struct sock
*sk
, struct request_sock
*req
, int i
, kuid_t uid
)
1741 int ttd
= req
->expires
- jiffies
;
1742 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1743 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1749 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1750 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1752 src
->s6_addr32
[0], src
->s6_addr32
[1],
1753 src
->s6_addr32
[2], src
->s6_addr32
[3],
1754 inet_rsk(req
)->ir_num
,
1755 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1756 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1757 ntohs(inet_rsk(req
)->ir_rmt_port
),
1759 0, 0, /* could print option size, but that is af dependent. */
1760 1, /* timers active (only the expire timer) */
1761 jiffies_to_clock_t(ttd
),
1763 from_kuid_munged(seq_user_ns(seq
), uid
),
1764 0, /* non standard timer */
1765 0, /* open_requests have no inode */
1769 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1771 const struct in6_addr
*dest
, *src
;
1774 unsigned long timer_expires
;
1775 const struct inet_sock
*inet
= inet_sk(sp
);
1776 const struct tcp_sock
*tp
= tcp_sk(sp
);
1777 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1779 dest
= &sp
->sk_v6_daddr
;
1780 src
= &sp
->sk_v6_rcv_saddr
;
1781 destp
= ntohs(inet
->inet_dport
);
1782 srcp
= ntohs(inet
->inet_sport
);
1784 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1786 timer_expires
= icsk
->icsk_timeout
;
1787 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1789 timer_expires
= icsk
->icsk_timeout
;
1790 } else if (timer_pending(&sp
->sk_timer
)) {
1792 timer_expires
= sp
->sk_timer
.expires
;
1795 timer_expires
= jiffies
;
1799 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1800 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1802 src
->s6_addr32
[0], src
->s6_addr32
[1],
1803 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1804 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1805 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1807 tp
->write_seq
-tp
->snd_una
,
1808 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1810 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1811 icsk
->icsk_retransmits
,
1812 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1813 icsk
->icsk_probes_out
,
1815 atomic_read(&sp
->sk_refcnt
), sp
,
1816 jiffies_to_clock_t(icsk
->icsk_rto
),
1817 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1818 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1820 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
1824 static void get_timewait6_sock(struct seq_file
*seq
,
1825 struct inet_timewait_sock
*tw
, int i
)
1827 const struct in6_addr
*dest
, *src
;
1829 s32 delta
= tw
->tw_ttd
- inet_tw_time_stamp();
1831 dest
= &tw
->tw_v6_daddr
;
1832 src
= &tw
->tw_v6_rcv_saddr
;
1833 destp
= ntohs(tw
->tw_dport
);
1834 srcp
= ntohs(tw
->tw_sport
);
1837 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1838 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1840 src
->s6_addr32
[0], src
->s6_addr32
[1],
1841 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1842 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1843 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1844 tw
->tw_substate
, 0, 0,
1845 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1846 atomic_read(&tw
->tw_refcnt
), tw
);
1849 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1851 struct tcp_iter_state
*st
;
1852 struct sock
*sk
= v
;
1854 if (v
== SEQ_START_TOKEN
) {
1859 "st tx_queue rx_queue tr tm->when retrnsmt"
1860 " uid timeout inode\n");
1865 switch (st
->state
) {
1866 case TCP_SEQ_STATE_LISTENING
:
1867 case TCP_SEQ_STATE_ESTABLISHED
:
1868 if (sk
->sk_state
== TCP_TIME_WAIT
)
1869 get_timewait6_sock(seq
, v
, st
->num
);
1871 get_tcp6_sock(seq
, v
, st
->num
);
1873 case TCP_SEQ_STATE_OPENREQ
:
1874 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1881 static const struct file_operations tcp6_afinfo_seq_fops
= {
1882 .owner
= THIS_MODULE
,
1883 .open
= tcp_seq_open
,
1885 .llseek
= seq_lseek
,
1886 .release
= seq_release_net
1889 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1892 .seq_fops
= &tcp6_afinfo_seq_fops
,
1894 .show
= tcp6_seq_show
,
1898 int __net_init
tcp6_proc_init(struct net
*net
)
1900 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1903 void tcp6_proc_exit(struct net
*net
)
1905 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1909 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1911 struct inet_sock
*inet
= inet_sk(sk
);
1913 /* we do not want to clear pinet6 field, because of RCU lookups */
1914 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1916 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1917 memset(&inet
->pinet6
+ 1, 0, size
);
1920 struct proto tcpv6_prot
= {
1922 .owner
= THIS_MODULE
,
1924 .connect
= tcp_v6_connect
,
1925 .disconnect
= tcp_disconnect
,
1926 .accept
= inet_csk_accept
,
1928 .init
= tcp_v6_init_sock
,
1929 .destroy
= tcp_v6_destroy_sock
,
1930 .shutdown
= tcp_shutdown
,
1931 .setsockopt
= tcp_setsockopt
,
1932 .getsockopt
= tcp_getsockopt
,
1933 .recvmsg
= tcp_recvmsg
,
1934 .sendmsg
= tcp_sendmsg
,
1935 .sendpage
= tcp_sendpage
,
1936 .backlog_rcv
= tcp_v6_do_rcv
,
1937 .release_cb
= tcp_release_cb
,
1938 .mtu_reduced
= tcp_v6_mtu_reduced
,
1939 .hash
= tcp_v6_hash
,
1940 .unhash
= inet_unhash
,
1941 .get_port
= inet_csk_get_port
,
1942 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1943 .stream_memory_free
= tcp_stream_memory_free
,
1944 .sockets_allocated
= &tcp_sockets_allocated
,
1945 .memory_allocated
= &tcp_memory_allocated
,
1946 .memory_pressure
= &tcp_memory_pressure
,
1947 .orphan_count
= &tcp_orphan_count
,
1948 .sysctl_mem
= sysctl_tcp_mem
,
1949 .sysctl_wmem
= sysctl_tcp_wmem
,
1950 .sysctl_rmem
= sysctl_tcp_rmem
,
1951 .max_header
= MAX_TCP_HEADER
,
1952 .obj_size
= sizeof(struct tcp6_sock
),
1953 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1954 .twsk_prot
= &tcp6_timewait_sock_ops
,
1955 .rsk_prot
= &tcp6_request_sock_ops
,
1956 .h
.hashinfo
= &tcp_hashinfo
,
1957 .no_autobind
= true,
1958 #ifdef CONFIG_COMPAT
1959 .compat_setsockopt
= compat_tcp_setsockopt
,
1960 .compat_getsockopt
= compat_tcp_getsockopt
,
1962 #ifdef CONFIG_MEMCG_KMEM
1963 .proto_cgroup
= tcp_proto_cgroup
,
1965 .clear_sk
= tcp_v6_clear_sk
,
1968 static const struct inet6_protocol tcpv6_protocol
= {
1969 .early_demux
= tcp_v6_early_demux
,
1970 .handler
= tcp_v6_rcv
,
1971 .err_handler
= tcp_v6_err
,
1972 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1975 static struct inet_protosw tcpv6_protosw
= {
1976 .type
= SOCK_STREAM
,
1977 .protocol
= IPPROTO_TCP
,
1978 .prot
= &tcpv6_prot
,
1979 .ops
= &inet6_stream_ops
,
1981 .flags
= INET_PROTOSW_PERMANENT
|
1985 static int __net_init
tcpv6_net_init(struct net
*net
)
1987 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1988 SOCK_RAW
, IPPROTO_TCP
, net
);
1991 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1993 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1996 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1998 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2001 static struct pernet_operations tcpv6_net_ops
= {
2002 .init
= tcpv6_net_init
,
2003 .exit
= tcpv6_net_exit
,
2004 .exit_batch
= tcpv6_net_exit_batch
,
2007 int __init
tcpv6_init(void)
2011 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2015 /* register inet6 protocol */
2016 ret
= inet6_register_protosw(&tcpv6_protosw
);
2018 goto out_tcpv6_protocol
;
2020 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2022 goto out_tcpv6_protosw
;
2027 inet6_unregister_protosw(&tcpv6_protosw
);
2029 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2033 void tcpv6_exit(void)
2035 unregister_pernet_subsys(&tcpv6_net_ops
);
2036 inet6_unregister_protosw(&tcpv6_protosw
);
2037 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);