3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
75 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
76 struct request_sock
*req
);
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static void __tcp_v6_send_check(struct sk_buff
*skb
,
80 const struct in6_addr
*saddr
,
81 const struct in6_addr
*daddr
);
83 static const struct inet_connection_sock_af_ops ipv6_mapped
;
84 static const struct inet_connection_sock_af_ops ipv6_specific
;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
89 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
90 const struct in6_addr
*addr
)
96 static void tcp_v6_hash(struct sock
*sk
)
98 if (sk
->sk_state
!= TCP_CLOSE
) {
99 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
104 __inet6_hash(sk
, NULL
);
109 static __inline__ __sum16
tcp_v6_check(int len
,
110 const struct in6_addr
*saddr
,
111 const struct in6_addr
*daddr
,
114 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
117 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
120 ipv6_hdr(skb
)->saddr
.s6_addr32
,
122 tcp_hdr(skb
)->source
);
125 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
128 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
129 struct inet_sock
*inet
= inet_sk(sk
);
130 struct inet_connection_sock
*icsk
= inet_csk(sk
);
131 struct ipv6_pinfo
*np
= inet6_sk(sk
);
132 struct tcp_sock
*tp
= tcp_sk(sk
);
133 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
136 struct dst_entry
*dst
;
140 if (addr_len
< SIN6_LEN_RFC2133
)
143 if (usin
->sin6_family
!= AF_INET6
)
144 return -EAFNOSUPPORT
;
146 memset(&fl6
, 0, sizeof(fl6
));
149 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
150 IP6_ECN_flow_init(fl6
.flowlabel
);
151 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
152 struct ip6_flowlabel
*flowlabel
;
153 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
154 if (flowlabel
== NULL
)
156 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
157 fl6_sock_release(flowlabel
);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin
->sin6_addr
))
166 usin
->sin6_addr
.s6_addr
[15] = 0x1;
168 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
170 if(addr_type
& IPV6_ADDR_MULTICAST
)
173 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
174 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
175 usin
->sin6_scope_id
) {
176 /* If interface is set while binding, indices
179 if (sk
->sk_bound_dev_if
&&
180 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
183 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
186 /* Connect to link-local address requires an interface */
187 if (!sk
->sk_bound_dev_if
)
191 if (tp
->rx_opt
.ts_recent_stamp
&&
192 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
193 tp
->rx_opt
.ts_recent
= 0;
194 tp
->rx_opt
.ts_recent_stamp
= 0;
198 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
199 np
->flow_label
= fl6
.flowlabel
;
205 if (addr_type
== IPV6_ADDR_MAPPED
) {
206 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
207 struct sockaddr_in sin
;
209 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk
))
214 sin
.sin_family
= AF_INET
;
215 sin
.sin_port
= usin
->sin6_port
;
216 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
218 icsk
->icsk_af_ops
= &ipv6_mapped
;
219 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
224 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
227 icsk
->icsk_ext_hdr_len
= exthdrlen
;
228 icsk
->icsk_af_ops
= &ipv6_specific
;
229 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp
->af_specific
= &tcp_sock_ipv6_specific
;
235 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
236 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
243 if (!ipv6_addr_any(&np
->rcv_saddr
))
244 saddr
= &np
->rcv_saddr
;
246 fl6
.flowi6_proto
= IPPROTO_TCP
;
247 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
248 ipv6_addr_copy(&fl6
.saddr
,
249 (saddr
? saddr
: &np
->saddr
));
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
270 /* set the source address */
271 ipv6_addr_copy(&np
->saddr
, saddr
);
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
281 struct inet_peer
*peer
= rt6_get_peer(rt
);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer
);
290 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
291 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
292 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
297 icsk
->icsk_ext_hdr_len
= 0;
299 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
302 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
304 inet
->inet_dport
= usin
->sin6_port
;
306 tcp_set_state(sk
, TCP_SYN_SENT
);
307 err
= inet6_hash_connect(&tcp_death_row
, sk
);
312 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
317 err
= tcp_connect(sk
);
324 tcp_set_state(sk
, TCP_CLOSE
);
327 inet
->inet_dport
= 0;
328 sk
->sk_route_caps
= 0;
332 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
333 u8 type
, u8 code
, int offset
, __be32 info
)
335 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
336 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
337 struct ipv6_pinfo
*np
;
342 struct net
*net
= dev_net(skb
->dev
);
344 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
345 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
348 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
353 if (sk
->sk_state
== TCP_TIME_WAIT
) {
354 inet_twsk_put(inet_twsk(sk
));
359 if (sock_owned_by_user(sk
))
360 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
362 if (sk
->sk_state
== TCP_CLOSE
)
365 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
366 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
371 seq
= ntohl(th
->seq
);
372 if (sk
->sk_state
!= TCP_LISTEN
&&
373 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
380 if (type
== ICMPV6_PKT_TOOBIG
) {
381 struct dst_entry
*dst
;
383 if (sock_owned_by_user(sk
))
385 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
388 /* icmp should have updated the destination cache entry */
389 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
392 struct inet_sock
*inet
= inet_sk(sk
);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6
, 0, sizeof(fl6
));
400 fl6
.flowi6_proto
= IPPROTO_TCP
;
401 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
402 ipv6_addr_copy(&fl6
.saddr
, &np
->saddr
);
403 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
404 fl6
.flowi6_mark
= sk
->sk_mark
;
405 fl6
.fl6_dport
= inet
->inet_dport
;
406 fl6
.fl6_sport
= inet
->inet_sport
;
407 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
409 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
411 sk
->sk_err_soft
= -PTR_ERR(dst
);
418 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
419 tcp_sync_mss(sk
, dst_mtu(dst
));
420 tcp_simple_retransmit(sk
);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type
, code
, &err
);
428 /* Might be for an request_sock */
429 switch (sk
->sk_state
) {
430 struct request_sock
*req
, **prev
;
432 if (sock_owned_by_user(sk
))
435 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
436 &hdr
->saddr
, inet6_iif(skb
));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req
->sk
!= NULL
);
445 if (seq
!= tcp_rsk(req
)->snt_isn
) {
446 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
450 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
454 case TCP_SYN_RECV
: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk
)) {
458 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
462 sk
->sk_err_soft
= err
;
466 if (!sock_owned_by_user(sk
) && np
->recverr
) {
468 sk
->sk_error_report(sk
);
470 sk
->sk_err_soft
= err
;
478 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
479 struct request_values
*rvp
)
481 struct inet6_request_sock
*treq
= inet6_rsk(req
);
482 struct ipv6_pinfo
*np
= inet6_sk(sk
);
483 struct sk_buff
* skb
;
484 struct ipv6_txoptions
*opt
= NULL
;
485 struct in6_addr
* final_p
, final
;
487 struct dst_entry
*dst
;
490 memset(&fl6
, 0, sizeof(fl6
));
491 fl6
.flowi6_proto
= IPPROTO_TCP
;
492 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
493 ipv6_addr_copy(&fl6
.saddr
, &treq
->loc_addr
);
495 fl6
.flowi6_oif
= treq
->iif
;
496 fl6
.flowi6_mark
= sk
->sk_mark
;
497 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
498 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
499 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
502 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
504 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
510 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
513 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
515 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
516 err
= ip6_xmit(sk
, skb
, &fl6
, opt
);
517 err
= net_xmit_eval(err
);
521 if (opt
&& opt
!= np
->opt
)
522 sock_kfree_s(sk
, opt
, opt
->tot_len
);
527 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
528 struct request_values
*rvp
)
530 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
531 return tcp_v6_send_synack(sk
, req
, rvp
);
534 static inline void syn_flood_warning(struct sk_buff
*skb
)
536 #ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies
)
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
548 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
550 kfree_skb(inet6_rsk(req
)->pktopts
);
553 #ifdef CONFIG_TCP_MD5SIG
554 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
555 const struct in6_addr
*addr
)
557 struct tcp_sock
*tp
= tcp_sk(sk
);
562 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
565 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
566 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
567 return &tp
->md5sig_info
->keys6
[i
].base
;
572 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
573 struct sock
*addr_sk
)
575 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
578 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
579 struct request_sock
*req
)
581 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
584 static int tcp_v6_md5_do_add(struct sock
*sk
, const struct in6_addr
*peer
,
585 char *newkey
, u8 newkeylen
)
587 /* Add key to the list */
588 struct tcp_md5sig_key
*key
;
589 struct tcp_sock
*tp
= tcp_sk(sk
);
590 struct tcp6_md5sig_key
*keys
;
592 key
= tcp_v6_md5_do_lookup(sk
, peer
);
594 /* modify existing entry - just update that one */
597 key
->keylen
= newkeylen
;
599 /* reallocate new list if current one is full. */
600 if (!tp
->md5sig_info
) {
601 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
602 if (!tp
->md5sig_info
) {
606 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
608 if (tcp_alloc_md5sig_pool(sk
) == NULL
) {
612 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
613 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
614 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
617 tcp_free_md5sig_pool();
622 if (tp
->md5sig_info
->entries6
)
623 memmove(keys
, tp
->md5sig_info
->keys6
,
624 (sizeof (tp
->md5sig_info
->keys6
[0]) *
625 tp
->md5sig_info
->entries6
));
627 kfree(tp
->md5sig_info
->keys6
);
628 tp
->md5sig_info
->keys6
= keys
;
629 tp
->md5sig_info
->alloced6
++;
632 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
634 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
635 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
637 tp
->md5sig_info
->entries6
++;
642 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
643 u8
*newkey
, __u8 newkeylen
)
645 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
649 static int tcp_v6_md5_do_del(struct sock
*sk
, const struct in6_addr
*peer
)
651 struct tcp_sock
*tp
= tcp_sk(sk
);
654 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
655 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
657 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
658 tp
->md5sig_info
->entries6
--;
660 if (tp
->md5sig_info
->entries6
== 0) {
661 kfree(tp
->md5sig_info
->keys6
);
662 tp
->md5sig_info
->keys6
= NULL
;
663 tp
->md5sig_info
->alloced6
= 0;
665 /* shrink the database */
666 if (tp
->md5sig_info
->entries6
!= i
)
667 memmove(&tp
->md5sig_info
->keys6
[i
],
668 &tp
->md5sig_info
->keys6
[i
+1],
669 (tp
->md5sig_info
->entries6
- i
)
670 * sizeof (tp
->md5sig_info
->keys6
[0]));
672 tcp_free_md5sig_pool();
679 static void tcp_v6_clear_md5_list (struct sock
*sk
)
681 struct tcp_sock
*tp
= tcp_sk(sk
);
684 if (tp
->md5sig_info
->entries6
) {
685 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
686 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
687 tp
->md5sig_info
->entries6
= 0;
688 tcp_free_md5sig_pool();
691 kfree(tp
->md5sig_info
->keys6
);
692 tp
->md5sig_info
->keys6
= NULL
;
693 tp
->md5sig_info
->alloced6
= 0;
695 if (tp
->md5sig_info
->entries4
) {
696 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
697 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
698 tp
->md5sig_info
->entries4
= 0;
699 tcp_free_md5sig_pool();
702 kfree(tp
->md5sig_info
->keys4
);
703 tp
->md5sig_info
->keys4
= NULL
;
704 tp
->md5sig_info
->alloced4
= 0;
707 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
710 struct tcp_md5sig cmd
;
711 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
714 if (optlen
< sizeof(cmd
))
717 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
720 if (sin6
->sin6_family
!= AF_INET6
)
723 if (!cmd
.tcpm_keylen
) {
724 if (!tcp_sk(sk
)->md5sig_info
)
726 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
727 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
728 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
731 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
734 if (!tcp_sk(sk
)->md5sig_info
) {
735 struct tcp_sock
*tp
= tcp_sk(sk
);
736 struct tcp_md5sig_info
*p
;
738 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
743 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
746 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
749 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
750 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
751 newkey
, cmd
.tcpm_keylen
);
753 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
756 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
757 const struct in6_addr
*daddr
,
758 const struct in6_addr
*saddr
, int nbytes
)
760 struct tcp6_pseudohdr
*bp
;
761 struct scatterlist sg
;
763 bp
= &hp
->md5_blk
.ip6
;
764 /* 1. TCP pseudo-header (RFC2460) */
765 ipv6_addr_copy(&bp
->saddr
, saddr
);
766 ipv6_addr_copy(&bp
->daddr
, daddr
);
767 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
768 bp
->len
= cpu_to_be32(nbytes
);
770 sg_init_one(&sg
, bp
, sizeof(*bp
));
771 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
774 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
775 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
778 struct tcp_md5sig_pool
*hp
;
779 struct hash_desc
*desc
;
781 hp
= tcp_get_md5sig_pool();
783 goto clear_hash_noput
;
784 desc
= &hp
->md5_desc
;
786 if (crypto_hash_init(desc
))
788 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
790 if (tcp_md5_hash_header(hp
, th
))
792 if (tcp_md5_hash_key(hp
, key
))
794 if (crypto_hash_final(desc
, md5_hash
))
797 tcp_put_md5sig_pool();
801 tcp_put_md5sig_pool();
803 memset(md5_hash
, 0, 16);
807 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
808 struct sock
*sk
, struct request_sock
*req
,
811 const struct in6_addr
*saddr
, *daddr
;
812 struct tcp_md5sig_pool
*hp
;
813 struct hash_desc
*desc
;
814 struct tcphdr
*th
= tcp_hdr(skb
);
817 saddr
= &inet6_sk(sk
)->saddr
;
818 daddr
= &inet6_sk(sk
)->daddr
;
820 saddr
= &inet6_rsk(req
)->loc_addr
;
821 daddr
= &inet6_rsk(req
)->rmt_addr
;
823 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
824 saddr
= &ip6h
->saddr
;
825 daddr
= &ip6h
->daddr
;
828 hp
= tcp_get_md5sig_pool();
830 goto clear_hash_noput
;
831 desc
= &hp
->md5_desc
;
833 if (crypto_hash_init(desc
))
836 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
838 if (tcp_md5_hash_header(hp
, th
))
840 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
842 if (tcp_md5_hash_key(hp
, key
))
844 if (crypto_hash_final(desc
, md5_hash
))
847 tcp_put_md5sig_pool();
851 tcp_put_md5sig_pool();
853 memset(md5_hash
, 0, 16);
857 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
859 __u8
*hash_location
= NULL
;
860 struct tcp_md5sig_key
*hash_expected
;
861 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
862 struct tcphdr
*th
= tcp_hdr(skb
);
866 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
867 hash_location
= tcp_parse_md5sig_option(th
);
869 /* We've parsed the options - do we have a hash? */
870 if (!hash_expected
&& !hash_location
)
873 if (hash_expected
&& !hash_location
) {
874 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
878 if (!hash_expected
&& hash_location
) {
879 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
883 /* check the signature */
884 genhash
= tcp_v6_md5_hash_skb(newhash
,
888 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
889 if (net_ratelimit()) {
890 printk(KERN_INFO
"MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
891 genhash
? "failed" : "mismatch",
892 &ip6h
->saddr
, ntohs(th
->source
),
893 &ip6h
->daddr
, ntohs(th
->dest
));
901 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
903 .obj_size
= sizeof(struct tcp6_request_sock
),
904 .rtx_syn_ack
= tcp_v6_rtx_synack
,
905 .send_ack
= tcp_v6_reqsk_send_ack
,
906 .destructor
= tcp_v6_reqsk_destructor
,
907 .send_reset
= tcp_v6_send_reset
,
908 .syn_ack_timeout
= tcp_syn_ack_timeout
,
911 #ifdef CONFIG_TCP_MD5SIG
912 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
913 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
914 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
918 static void __tcp_v6_send_check(struct sk_buff
*skb
,
919 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
921 struct tcphdr
*th
= tcp_hdr(skb
);
923 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
924 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
925 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
926 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
928 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
929 csum_partial(th
, th
->doff
<< 2,
934 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
936 struct ipv6_pinfo
*np
= inet6_sk(sk
);
938 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
941 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
943 const struct ipv6hdr
*ipv6h
;
946 if (!pskb_may_pull(skb
, sizeof(*th
)))
949 ipv6h
= ipv6_hdr(skb
);
953 skb
->ip_summed
= CHECKSUM_PARTIAL
;
954 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
958 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
961 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
963 switch (skb
->ip_summed
) {
964 case CHECKSUM_COMPLETE
:
965 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
967 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
973 NAPI_GRO_CB(skb
)->flush
= 1;
977 return tcp_gro_receive(head
, skb
);
980 static int tcp6_gro_complete(struct sk_buff
*skb
)
982 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
983 struct tcphdr
*th
= tcp_hdr(skb
);
985 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
986 &iph
->saddr
, &iph
->daddr
, 0);
987 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
989 return tcp_gro_complete(skb
);
992 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
993 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
995 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
996 struct sk_buff
*buff
;
998 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
999 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
1000 unsigned int tot_len
= sizeof(struct tcphdr
);
1001 struct dst_entry
*dst
;
1005 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1006 #ifdef CONFIG_TCP_MD5SIG
1008 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1011 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1016 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1018 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1019 skb_reset_transport_header(buff
);
1021 /* Swap the send and the receive. */
1022 memset(t1
, 0, sizeof(*t1
));
1023 t1
->dest
= th
->source
;
1024 t1
->source
= th
->dest
;
1025 t1
->doff
= tot_len
/ 4;
1026 t1
->seq
= htonl(seq
);
1027 t1
->ack_seq
= htonl(ack
);
1028 t1
->ack
= !rst
|| !th
->ack
;
1030 t1
->window
= htons(win
);
1032 topt
= (__be32
*)(t1
+ 1);
1035 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1036 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1037 *topt
++ = htonl(tcp_time_stamp
);
1038 *topt
++ = htonl(ts
);
1041 #ifdef CONFIG_TCP_MD5SIG
1043 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1044 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1045 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1046 &ipv6_hdr(skb
)->saddr
,
1047 &ipv6_hdr(skb
)->daddr
, t1
);
1051 memset(&fl6
, 0, sizeof(fl6
));
1052 ipv6_addr_copy(&fl6
.daddr
, &ipv6_hdr(skb
)->saddr
);
1053 ipv6_addr_copy(&fl6
.saddr
, &ipv6_hdr(skb
)->daddr
);
1055 buff
->ip_summed
= CHECKSUM_PARTIAL
;
1058 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
1060 fl6
.flowi6_proto
= IPPROTO_TCP
;
1061 fl6
.flowi6_oif
= inet6_iif(skb
);
1062 fl6
.fl6_dport
= t1
->dest
;
1063 fl6
.fl6_sport
= t1
->source
;
1064 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
1066 /* Pass a socket to ip6_dst_lookup either it is for RST
1067 * Underlying function will use this to retrieve the network
1070 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
1072 skb_dst_set(buff
, dst
);
1073 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
);
1074 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1076 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1083 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1085 struct tcphdr
*th
= tcp_hdr(skb
);
1086 u32 seq
= 0, ack_seq
= 0;
1087 struct tcp_md5sig_key
*key
= NULL
;
1092 if (!ipv6_unicast_destination(skb
))
1095 #ifdef CONFIG_TCP_MD5SIG
1097 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1101 seq
= ntohl(th
->ack_seq
);
1103 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1106 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1109 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1110 struct tcp_md5sig_key
*key
)
1112 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1115 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1117 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1118 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1120 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1121 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1122 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1127 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1128 struct request_sock
*req
)
1130 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1131 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1135 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1137 struct request_sock
*req
, **prev
;
1138 const struct tcphdr
*th
= tcp_hdr(skb
);
1141 /* Find possible connection requests. */
1142 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1143 &ipv6_hdr(skb
)->saddr
,
1144 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1146 return tcp_check_req(sk
, skb
, req
, prev
);
1148 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1149 &ipv6_hdr(skb
)->saddr
, th
->source
,
1150 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1153 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1157 inet_twsk_put(inet_twsk(nsk
));
1161 #ifdef CONFIG_SYN_COOKIES
1163 sk
= cookie_v6_check(sk
, skb
);
1168 /* FIXME: this is substantially similar to the ipv4 code.
1169 * Can some kind of merge be done? -- erics
1171 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1173 struct tcp_extend_values tmp_ext
;
1174 struct tcp_options_received tmp_opt
;
1176 struct request_sock
*req
;
1177 struct inet6_request_sock
*treq
;
1178 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1179 struct tcp_sock
*tp
= tcp_sk(sk
);
1180 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1181 struct dst_entry
*dst
= NULL
;
1182 #ifdef CONFIG_SYN_COOKIES
1183 int want_cookie
= 0;
1185 #define want_cookie 0
1188 if (skb
->protocol
== htons(ETH_P_IP
))
1189 return tcp_v4_conn_request(sk
, skb
);
1191 if (!ipv6_unicast_destination(skb
))
1194 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1195 if (net_ratelimit())
1196 syn_flood_warning(skb
);
1197 #ifdef CONFIG_SYN_COOKIES
1198 if (sysctl_tcp_syncookies
)
1205 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1208 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1212 #ifdef CONFIG_TCP_MD5SIG
1213 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1216 tcp_clear_options(&tmp_opt
);
1217 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1218 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1219 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1221 if (tmp_opt
.cookie_plus
> 0 &&
1222 tmp_opt
.saw_tstamp
&&
1223 !tp
->rx_opt
.cookie_out_never
&&
1224 (sysctl_tcp_cookie_size
> 0 ||
1225 (tp
->cookie_values
!= NULL
&&
1226 tp
->cookie_values
->cookie_desired
> 0))) {
1229 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1230 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1232 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1235 /* Secret recipe starts with IP addresses */
1236 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1241 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1247 /* plus variable length Initiator Cookie */
1250 *c
++ ^= *hash_location
++;
1252 #ifdef CONFIG_SYN_COOKIES
1253 want_cookie
= 0; /* not our kind of cookie */
1255 tmp_ext
.cookie_out_never
= 0; /* false */
1256 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1257 } else if (!tp
->rx_opt
.cookie_in_always
) {
1258 /* redundant indications, but ensure initialization. */
1259 tmp_ext
.cookie_out_never
= 1; /* true */
1260 tmp_ext
.cookie_plus
= 0;
1264 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1266 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1267 tcp_clear_options(&tmp_opt
);
1269 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1270 tcp_openreq_init(req
, &tmp_opt
, skb
);
1272 treq
= inet6_rsk(req
);
1273 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1274 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1275 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1276 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1279 struct inet_peer
*peer
= NULL
;
1281 if (ipv6_opt_accepted(sk
, skb
) ||
1282 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1283 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1284 atomic_inc(&skb
->users
);
1285 treq
->pktopts
= skb
;
1287 treq
->iif
= sk
->sk_bound_dev_if
;
1289 /* So that link locals have meaning */
1290 if (!sk
->sk_bound_dev_if
&&
1291 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1292 treq
->iif
= inet6_iif(skb
);
1295 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1296 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1300 /* VJ's idea. We save last timestamp seen
1301 * from the destination in peer table, when entering
1302 * state TIME-WAIT, and check against it before
1303 * accepting new connection request.
1305 * If "isn" is not zero, this request hit alive
1306 * timewait bucket, so that all the necessary checks
1307 * are made in the function processing timewait state.
1309 if (tmp_opt
.saw_tstamp
&&
1310 tcp_death_row
.sysctl_tw_recycle
&&
1311 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1312 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1313 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1315 inet_peer_refcheck(peer
);
1316 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1317 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1319 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1320 goto drop_and_release
;
1323 /* Kill the following clause, if you dislike this way. */
1324 else if (!sysctl_tcp_syncookies
&&
1325 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1326 (sysctl_max_syn_backlog
>> 2)) &&
1327 (!peer
|| !peer
->tcp_ts_stamp
) &&
1328 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1329 /* Without syncookies last quarter of
1330 * backlog is filled with destinations,
1331 * proven to be alive.
1332 * It means that we continue to communicate
1333 * to destinations, already remembered
1334 * to the moment of synflood.
1336 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1337 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1338 goto drop_and_release
;
1341 isn
= tcp_v6_init_sequence(skb
);
1344 tcp_rsk(req
)->snt_isn
= isn
;
1345 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1347 security_inet_conn_request(sk
, skb
, req
);
1349 if (tcp_v6_send_synack(sk
, req
,
1350 (struct request_values
*)&tmp_ext
) ||
1354 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1362 return 0; /* don't send reset */
1365 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1366 struct request_sock
*req
,
1367 struct dst_entry
*dst
)
1369 struct inet6_request_sock
*treq
;
1370 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1371 struct tcp6_sock
*newtcp6sk
;
1372 struct inet_sock
*newinet
;
1373 struct tcp_sock
*newtp
;
1375 struct ipv6_txoptions
*opt
;
1376 #ifdef CONFIG_TCP_MD5SIG
1377 struct tcp_md5sig_key
*key
;
1380 if (skb
->protocol
== htons(ETH_P_IP
)) {
1385 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1390 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1391 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1393 newinet
= inet_sk(newsk
);
1394 newnp
= inet6_sk(newsk
);
1395 newtp
= tcp_sk(newsk
);
1397 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1399 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1401 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1403 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1405 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1406 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1407 #ifdef CONFIG_TCP_MD5SIG
1408 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1411 newnp
->pktoptions
= NULL
;
1413 newnp
->mcast_oif
= inet6_iif(skb
);
1414 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1417 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1418 * here, tcp_create_openreq_child now does this for us, see the comment in
1419 * that function for the gory details. -acme
1422 /* It is tricky place. Until this moment IPv4 tcp
1423 worked with IPv6 icsk.icsk_af_ops.
1426 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1431 treq
= inet6_rsk(req
);
1434 if (sk_acceptq_is_full(sk
))
1438 dst
= inet6_csk_route_req(sk
, req
);
1443 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1448 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1449 * count here, tcp_create_openreq_child now does this for us, see the
1450 * comment in that function for the gory details. -acme
1453 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1454 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1456 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1457 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1459 newtp
= tcp_sk(newsk
);
1460 newinet
= inet_sk(newsk
);
1461 newnp
= inet6_sk(newsk
);
1463 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1465 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1466 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1467 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1468 newsk
->sk_bound_dev_if
= treq
->iif
;
1470 /* Now IPv6 options...
1472 First: no IPv4 options.
1474 newinet
->inet_opt
= NULL
;
1475 newnp
->ipv6_fl_list
= NULL
;
1478 newnp
->rxopt
.all
= np
->rxopt
.all
;
1480 /* Clone pktoptions received with SYN */
1481 newnp
->pktoptions
= NULL
;
1482 if (treq
->pktopts
!= NULL
) {
1483 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1484 kfree_skb(treq
->pktopts
);
1485 treq
->pktopts
= NULL
;
1486 if (newnp
->pktoptions
)
1487 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1490 newnp
->mcast_oif
= inet6_iif(skb
);
1491 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1493 /* Clone native IPv6 options from listening socket (if any)
1495 Yes, keeping reference count would be much more clever,
1496 but we make one more one thing there: reattach optmem
1500 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1502 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1505 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1507 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1508 newnp
->opt
->opt_flen
);
1510 tcp_mtup_init(newsk
);
1511 tcp_sync_mss(newsk
, dst_mtu(dst
));
1512 newtp
->advmss
= dst_metric_advmss(dst
);
1513 tcp_initialize_rcv_mss(newsk
);
1514 if (tcp_rsk(req
)->snt_synack
)
1515 tcp_valid_rtt_meas(newsk
,
1516 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1517 newtp
->total_retrans
= req
->retrans
;
1519 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1520 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1522 #ifdef CONFIG_TCP_MD5SIG
1523 /* Copy over the MD5 key from the original socket */
1524 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1525 /* We're using one, so create a matching key
1526 * on the newsk structure. If we fail to get
1527 * memory, then we end up not copying the key
1530 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1532 tcp_v6_md5_do_add(newsk
, &newnp
->daddr
,
1533 newkey
, key
->keylen
);
1537 if (__inet_inherit_port(sk
, newsk
) < 0) {
1541 __inet6_hash(newsk
, NULL
);
1546 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1548 if (opt
&& opt
!= np
->opt
)
1549 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1552 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1556 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1558 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1559 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1560 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1561 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1566 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1567 &ipv6_hdr(skb
)->saddr
,
1568 &ipv6_hdr(skb
)->daddr
, 0));
1570 if (skb
->len
<= 76) {
1571 return __skb_checksum_complete(skb
);
1576 /* The socket must have it's spinlock held when we get
1579 * We have a potential double-lock case here, so even when
1580 * doing backlog processing we use the BH locking scheme.
1581 * This is because we cannot sleep with the original spinlock
1584 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1586 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1587 struct tcp_sock
*tp
;
1588 struct sk_buff
*opt_skb
= NULL
;
1590 /* Imagine: socket is IPv6. IPv4 packet arrives,
1591 goes to IPv4 receive handler and backlogged.
1592 From backlog it always goes here. Kerboom...
1593 Fortunately, tcp_rcv_established and rcv_established
1594 handle them correctly, but it is not case with
1595 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1598 if (skb
->protocol
== htons(ETH_P_IP
))
1599 return tcp_v4_do_rcv(sk
, skb
);
1601 #ifdef CONFIG_TCP_MD5SIG
1602 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1606 if (sk_filter(sk
, skb
))
1610 * socket locking is here for SMP purposes as backlog rcv
1611 * is currently called with bh processing disabled.
1614 /* Do Stevens' IPV6_PKTOPTIONS.
1616 Yes, guys, it is the only place in our code, where we
1617 may make it not affecting IPv4.
1618 The rest of code is protocol independent,
1619 and I do not like idea to uglify IPv4.
1621 Actually, all the idea behind IPV6_PKTOPTIONS
1622 looks not very well thought. For now we latch
1623 options, received in the last packet, enqueued
1624 by tcp. Feel free to propose better solution.
1628 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1630 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1631 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1632 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1635 goto ipv6_pktoptions
;
1639 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1642 if (sk
->sk_state
== TCP_LISTEN
) {
1643 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1648 * Queue it on the new socket if the new socket is active,
1649 * otherwise we just shortcircuit this and continue with
1653 sock_rps_save_rxhash(nsk
, skb
->rxhash
);
1654 if (tcp_child_process(sk
, nsk
, skb
))
1657 __kfree_skb(opt_skb
);
1661 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1663 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1666 goto ipv6_pktoptions
;
1670 tcp_v6_send_reset(sk
, skb
);
1673 __kfree_skb(opt_skb
);
1677 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1682 /* Do you ask, what is it?
1684 1. skb was enqueued by tcp.
1685 2. skb is added to tail of read queue, rather than out of order.
1686 3. socket is not in passive state.
1687 4. Finally, it really contains options, which user wants to receive.
1690 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1691 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1692 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1693 np
->mcast_oif
= inet6_iif(opt_skb
);
1694 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1695 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1696 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1697 skb_set_owner_r(opt_skb
, sk
);
1698 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1700 __kfree_skb(opt_skb
);
1701 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1709 static int tcp_v6_rcv(struct sk_buff
*skb
)
1712 const struct ipv6hdr
*hdr
;
1715 struct net
*net
= dev_net(skb
->dev
);
1717 if (skb
->pkt_type
!= PACKET_HOST
)
1721 * Count it even if it's bad.
1723 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1725 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1730 if (th
->doff
< sizeof(struct tcphdr
)/4)
1732 if (!pskb_may_pull(skb
, th
->doff
*4))
1735 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1739 hdr
= ipv6_hdr(skb
);
1740 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1741 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1742 skb
->len
- th
->doff
*4);
1743 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1744 TCP_SKB_CB(skb
)->when
= 0;
1745 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(hdr
);
1746 TCP_SKB_CB(skb
)->sacked
= 0;
1748 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1753 if (sk
->sk_state
== TCP_TIME_WAIT
)
1756 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1757 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1758 goto discard_and_relse
;
1761 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1762 goto discard_and_relse
;
1764 if (sk_filter(sk
, skb
))
1765 goto discard_and_relse
;
1769 bh_lock_sock_nested(sk
);
1771 if (!sock_owned_by_user(sk
)) {
1772 #ifdef CONFIG_NET_DMA
1773 struct tcp_sock
*tp
= tcp_sk(sk
);
1774 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1775 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1776 if (tp
->ucopy
.dma_chan
)
1777 ret
= tcp_v6_do_rcv(sk
, skb
);
1781 if (!tcp_prequeue(sk
, skb
))
1782 ret
= tcp_v6_do_rcv(sk
, skb
);
1784 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1786 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1787 goto discard_and_relse
;
1792 return ret
? -1 : 0;
1795 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1798 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1800 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1802 tcp_v6_send_reset(NULL
, skb
);
1819 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1820 inet_twsk_put(inet_twsk(sk
));
1824 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1825 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1826 inet_twsk_put(inet_twsk(sk
));
1830 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1835 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1836 &ipv6_hdr(skb
)->daddr
,
1837 ntohs(th
->dest
), inet6_iif(skb
));
1839 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1840 inet_twsk_deschedule(tw
, &tcp_death_row
);
1845 /* Fall through to ACK */
1848 tcp_v6_timewait_ack(sk
, skb
);
1852 case TCP_TW_SUCCESS
:;
1857 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1859 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1860 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1861 struct inet_peer
*peer
;
1864 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1865 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1869 rt6_bind_peer(rt
, 1);
1870 peer
= rt
->rt6i_peer
;
1871 *release_it
= false;
1877 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1879 struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1880 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1882 if (tw
->tw_family
== AF_INET
)
1883 return tcp_v4_tw_get_peer(sk
);
1885 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1888 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1889 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1890 .twsk_unique
= tcp_twsk_unique
,
1891 .twsk_destructor
= tcp_twsk_destructor
,
1892 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1895 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1896 .queue_xmit
= inet6_csk_xmit
,
1897 .send_check
= tcp_v6_send_check
,
1898 .rebuild_header
= inet6_sk_rebuild_header
,
1899 .conn_request
= tcp_v6_conn_request
,
1900 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1901 .get_peer
= tcp_v6_get_peer
,
1902 .net_header_len
= sizeof(struct ipv6hdr
),
1903 .setsockopt
= ipv6_setsockopt
,
1904 .getsockopt
= ipv6_getsockopt
,
1905 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1906 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1907 .bind_conflict
= inet6_csk_bind_conflict
,
1908 #ifdef CONFIG_COMPAT
1909 .compat_setsockopt
= compat_ipv6_setsockopt
,
1910 .compat_getsockopt
= compat_ipv6_getsockopt
,
1914 #ifdef CONFIG_TCP_MD5SIG
1915 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1916 .md5_lookup
= tcp_v6_md5_lookup
,
1917 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1918 .md5_add
= tcp_v6_md5_add_func
,
1919 .md5_parse
= tcp_v6_parse_md5_keys
,
1924 * TCP over IPv4 via INET6 API
1927 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1928 .queue_xmit
= ip_queue_xmit
,
1929 .send_check
= tcp_v4_send_check
,
1930 .rebuild_header
= inet_sk_rebuild_header
,
1931 .conn_request
= tcp_v6_conn_request
,
1932 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1933 .get_peer
= tcp_v4_get_peer
,
1934 .net_header_len
= sizeof(struct iphdr
),
1935 .setsockopt
= ipv6_setsockopt
,
1936 .getsockopt
= ipv6_getsockopt
,
1937 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1938 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1939 .bind_conflict
= inet6_csk_bind_conflict
,
1940 #ifdef CONFIG_COMPAT
1941 .compat_setsockopt
= compat_ipv6_setsockopt
,
1942 .compat_getsockopt
= compat_ipv6_getsockopt
,
1946 #ifdef CONFIG_TCP_MD5SIG
1947 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1948 .md5_lookup
= tcp_v4_md5_lookup
,
1949 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1950 .md5_add
= tcp_v6_md5_add_func
,
1951 .md5_parse
= tcp_v6_parse_md5_keys
,
1955 /* NOTE: A lot of things set to zero explicitly by call to
1956 * sk_alloc() so need not be done here.
1958 static int tcp_v6_init_sock(struct sock
*sk
)
1960 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1961 struct tcp_sock
*tp
= tcp_sk(sk
);
1963 skb_queue_head_init(&tp
->out_of_order_queue
);
1964 tcp_init_xmit_timers(sk
);
1965 tcp_prequeue_init(tp
);
1967 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1968 tp
->mdev
= TCP_TIMEOUT_INIT
;
1970 /* So many TCP implementations out there (incorrectly) count the
1971 * initial SYN frame in their delayed-ACK and congestion control
1972 * algorithms that we must have the following bandaid to talk
1973 * efficiently to them. -DaveM
1977 /* See draft-stevens-tcpca-spec-01 for discussion of the
1978 * initialization of these values.
1980 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1981 tp
->snd_cwnd_clamp
= ~0;
1982 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1984 tp
->reordering
= sysctl_tcp_reordering
;
1986 sk
->sk_state
= TCP_CLOSE
;
1988 icsk
->icsk_af_ops
= &ipv6_specific
;
1989 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1990 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1991 sk
->sk_write_space
= sk_stream_write_space
;
1992 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1994 #ifdef CONFIG_TCP_MD5SIG
1995 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1998 /* TCP Cookie Transactions */
1999 if (sysctl_tcp_cookie_size
> 0) {
2000 /* Default, cookies without s_data_payload. */
2002 kzalloc(sizeof(*tp
->cookie_values
),
2004 if (tp
->cookie_values
!= NULL
)
2005 kref_init(&tp
->cookie_values
->kref
);
2007 /* Presumed zeroed, in order of appearance:
2008 * cookie_in_always, cookie_out_never,
2009 * s_data_constant, s_data_in, s_data_out
2011 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
2012 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
2015 percpu_counter_inc(&tcp_sockets_allocated
);
2021 static void tcp_v6_destroy_sock(struct sock
*sk
)
2023 #ifdef CONFIG_TCP_MD5SIG
2024 /* Clean up the MD5 key list */
2025 if (tcp_sk(sk
)->md5sig_info
)
2026 tcp_v6_clear_md5_list(sk
);
2028 tcp_v4_destroy_sock(sk
);
2029 inet6_destroy_sock(sk
);
2032 #ifdef CONFIG_PROC_FS
2033 /* Proc filesystem TCPv6 sock list dumping. */
2034 static void get_openreq6(struct seq_file
*seq
,
2035 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
2037 int ttd
= req
->expires
- jiffies
;
2038 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
2039 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
2045 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2046 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2048 src
->s6_addr32
[0], src
->s6_addr32
[1],
2049 src
->s6_addr32
[2], src
->s6_addr32
[3],
2050 ntohs(inet_rsk(req
)->loc_port
),
2051 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2052 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
2053 ntohs(inet_rsk(req
)->rmt_port
),
2055 0,0, /* could print option size, but that is af dependent. */
2056 1, /* timers active (only the expire timer) */
2057 jiffies_to_clock_t(ttd
),
2060 0, /* non standard timer */
2061 0, /* open_requests have no inode */
2065 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2067 const struct in6_addr
*dest
, *src
;
2070 unsigned long timer_expires
;
2071 struct inet_sock
*inet
= inet_sk(sp
);
2072 struct tcp_sock
*tp
= tcp_sk(sp
);
2073 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2074 struct ipv6_pinfo
*np
= inet6_sk(sp
);
2077 src
= &np
->rcv_saddr
;
2078 destp
= ntohs(inet
->inet_dport
);
2079 srcp
= ntohs(inet
->inet_sport
);
2081 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2083 timer_expires
= icsk
->icsk_timeout
;
2084 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2086 timer_expires
= icsk
->icsk_timeout
;
2087 } else if (timer_pending(&sp
->sk_timer
)) {
2089 timer_expires
= sp
->sk_timer
.expires
;
2092 timer_expires
= jiffies
;
2096 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2097 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2099 src
->s6_addr32
[0], src
->s6_addr32
[1],
2100 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2101 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2102 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2104 tp
->write_seq
-tp
->snd_una
,
2105 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2107 jiffies_to_clock_t(timer_expires
- jiffies
),
2108 icsk
->icsk_retransmits
,
2110 icsk
->icsk_probes_out
,
2112 atomic_read(&sp
->sk_refcnt
), sp
,
2113 jiffies_to_clock_t(icsk
->icsk_rto
),
2114 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2115 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2117 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
2121 static void get_timewait6_sock(struct seq_file
*seq
,
2122 struct inet_timewait_sock
*tw
, int i
)
2124 const struct in6_addr
*dest
, *src
;
2126 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2127 int ttd
= tw
->tw_ttd
- jiffies
;
2132 dest
= &tw6
->tw_v6_daddr
;
2133 src
= &tw6
->tw_v6_rcv_saddr
;
2134 destp
= ntohs(tw
->tw_dport
);
2135 srcp
= ntohs(tw
->tw_sport
);
2138 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2139 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2141 src
->s6_addr32
[0], src
->s6_addr32
[1],
2142 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2143 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2144 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2145 tw
->tw_substate
, 0, 0,
2146 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2147 atomic_read(&tw
->tw_refcnt
), tw
);
2150 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2152 struct tcp_iter_state
*st
;
2154 if (v
== SEQ_START_TOKEN
) {
2159 "st tx_queue rx_queue tr tm->when retrnsmt"
2160 " uid timeout inode\n");
2165 switch (st
->state
) {
2166 case TCP_SEQ_STATE_LISTENING
:
2167 case TCP_SEQ_STATE_ESTABLISHED
:
2168 get_tcp6_sock(seq
, v
, st
->num
);
2170 case TCP_SEQ_STATE_OPENREQ
:
2171 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2173 case TCP_SEQ_STATE_TIME_WAIT
:
2174 get_timewait6_sock(seq
, v
, st
->num
);
2181 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2185 .owner
= THIS_MODULE
,
2188 .show
= tcp6_seq_show
,
2192 int __net_init
tcp6_proc_init(struct net
*net
)
2194 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2197 void tcp6_proc_exit(struct net
*net
)
2199 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2203 struct proto tcpv6_prot
= {
2205 .owner
= THIS_MODULE
,
2207 .connect
= tcp_v6_connect
,
2208 .disconnect
= tcp_disconnect
,
2209 .accept
= inet_csk_accept
,
2211 .init
= tcp_v6_init_sock
,
2212 .destroy
= tcp_v6_destroy_sock
,
2213 .shutdown
= tcp_shutdown
,
2214 .setsockopt
= tcp_setsockopt
,
2215 .getsockopt
= tcp_getsockopt
,
2216 .recvmsg
= tcp_recvmsg
,
2217 .sendmsg
= tcp_sendmsg
,
2218 .sendpage
= tcp_sendpage
,
2219 .backlog_rcv
= tcp_v6_do_rcv
,
2220 .hash
= tcp_v6_hash
,
2221 .unhash
= inet_unhash
,
2222 .get_port
= inet_csk_get_port
,
2223 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2224 .sockets_allocated
= &tcp_sockets_allocated
,
2225 .memory_allocated
= &tcp_memory_allocated
,
2226 .memory_pressure
= &tcp_memory_pressure
,
2227 .orphan_count
= &tcp_orphan_count
,
2228 .sysctl_mem
= sysctl_tcp_mem
,
2229 .sysctl_wmem
= sysctl_tcp_wmem
,
2230 .sysctl_rmem
= sysctl_tcp_rmem
,
2231 .max_header
= MAX_TCP_HEADER
,
2232 .obj_size
= sizeof(struct tcp6_sock
),
2233 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2234 .twsk_prot
= &tcp6_timewait_sock_ops
,
2235 .rsk_prot
= &tcp6_request_sock_ops
,
2236 .h
.hashinfo
= &tcp_hashinfo
,
2237 .no_autobind
= true,
2238 #ifdef CONFIG_COMPAT
2239 .compat_setsockopt
= compat_tcp_setsockopt
,
2240 .compat_getsockopt
= compat_tcp_getsockopt
,
2244 static const struct inet6_protocol tcpv6_protocol
= {
2245 .handler
= tcp_v6_rcv
,
2246 .err_handler
= tcp_v6_err
,
2247 .gso_send_check
= tcp_v6_gso_send_check
,
2248 .gso_segment
= tcp_tso_segment
,
2249 .gro_receive
= tcp6_gro_receive
,
2250 .gro_complete
= tcp6_gro_complete
,
2251 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2254 static struct inet_protosw tcpv6_protosw
= {
2255 .type
= SOCK_STREAM
,
2256 .protocol
= IPPROTO_TCP
,
2257 .prot
= &tcpv6_prot
,
2258 .ops
= &inet6_stream_ops
,
2260 .flags
= INET_PROTOSW_PERMANENT
|
2264 static int __net_init
tcpv6_net_init(struct net
*net
)
2266 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2267 SOCK_RAW
, IPPROTO_TCP
, net
);
2270 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2272 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2275 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2277 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2280 static struct pernet_operations tcpv6_net_ops
= {
2281 .init
= tcpv6_net_init
,
2282 .exit
= tcpv6_net_exit
,
2283 .exit_batch
= tcpv6_net_exit_batch
,
2286 int __init
tcpv6_init(void)
2290 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2294 /* register inet6 protocol */
2295 ret
= inet6_register_protosw(&tcpv6_protosw
);
2297 goto out_tcpv6_protocol
;
2299 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2301 goto out_tcpv6_protosw
;
2306 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2308 inet6_unregister_protosw(&tcpv6_protosw
);
2312 void tcpv6_exit(void)
2314 unregister_pernet_subsys(&tcpv6_net_ops
);
2315 inet6_unregister_protosw(&tcpv6_protosw
);
2316 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);