3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
75 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
76 struct request_sock
*req
);
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static void __tcp_v6_send_check(struct sk_buff
*skb
,
80 const struct in6_addr
*saddr
,
81 const struct in6_addr
*daddr
);
83 static const struct inet_connection_sock_af_ops ipv6_mapped
;
84 static const struct inet_connection_sock_af_ops ipv6_specific
;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
89 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
90 const struct in6_addr
*addr
)
96 static void tcp_v6_hash(struct sock
*sk
)
98 if (sk
->sk_state
!= TCP_CLOSE
) {
99 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
104 __inet6_hash(sk
, NULL
);
109 static __inline__ __sum16
tcp_v6_check(int len
,
110 const struct in6_addr
*saddr
,
111 const struct in6_addr
*daddr
,
114 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
117 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
120 ipv6_hdr(skb
)->saddr
.s6_addr32
,
122 tcp_hdr(skb
)->source
);
125 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
128 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
129 struct inet_sock
*inet
= inet_sk(sk
);
130 struct inet_connection_sock
*icsk
= inet_csk(sk
);
131 struct ipv6_pinfo
*np
= inet6_sk(sk
);
132 struct tcp_sock
*tp
= tcp_sk(sk
);
133 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
136 struct dst_entry
*dst
;
140 if (addr_len
< SIN6_LEN_RFC2133
)
143 if (usin
->sin6_family
!= AF_INET6
)
144 return -EAFNOSUPPORT
;
146 memset(&fl6
, 0, sizeof(fl6
));
149 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
150 IP6_ECN_flow_init(fl6
.flowlabel
);
151 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
152 struct ip6_flowlabel
*flowlabel
;
153 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
154 if (flowlabel
== NULL
)
156 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
157 fl6_sock_release(flowlabel
);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin
->sin6_addr
))
166 usin
->sin6_addr
.s6_addr
[15] = 0x1;
168 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
170 if(addr_type
& IPV6_ADDR_MULTICAST
)
173 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
174 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
175 usin
->sin6_scope_id
) {
176 /* If interface is set while binding, indices
179 if (sk
->sk_bound_dev_if
&&
180 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
183 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
186 /* Connect to link-local address requires an interface */
187 if (!sk
->sk_bound_dev_if
)
191 if (tp
->rx_opt
.ts_recent_stamp
&&
192 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
193 tp
->rx_opt
.ts_recent
= 0;
194 tp
->rx_opt
.ts_recent_stamp
= 0;
198 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
199 np
->flow_label
= fl6
.flowlabel
;
205 if (addr_type
== IPV6_ADDR_MAPPED
) {
206 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
207 struct sockaddr_in sin
;
209 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk
))
214 sin
.sin_family
= AF_INET
;
215 sin
.sin_port
= usin
->sin6_port
;
216 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
218 icsk
->icsk_af_ops
= &ipv6_mapped
;
219 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
224 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
227 icsk
->icsk_ext_hdr_len
= exthdrlen
;
228 icsk
->icsk_af_ops
= &ipv6_specific
;
229 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp
->af_specific
= &tcp_sock_ipv6_specific
;
235 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
236 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
243 if (!ipv6_addr_any(&np
->rcv_saddr
))
244 saddr
= &np
->rcv_saddr
;
246 fl6
.flowi6_proto
= IPPROTO_TCP
;
247 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
248 ipv6_addr_copy(&fl6
.saddr
,
249 (saddr
? saddr
: &np
->saddr
));
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
270 /* set the source address */
271 ipv6_addr_copy(&np
->saddr
, saddr
);
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
281 struct inet_peer
*peer
= rt6_get_peer(rt
);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer
);
290 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
291 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
292 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
297 icsk
->icsk_ext_hdr_len
= 0;
299 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
302 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
304 inet
->inet_dport
= usin
->sin6_port
;
306 tcp_set_state(sk
, TCP_SYN_SENT
);
307 err
= inet6_hash_connect(&tcp_death_row
, sk
);
312 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
317 err
= tcp_connect(sk
);
324 tcp_set_state(sk
, TCP_CLOSE
);
327 inet
->inet_dport
= 0;
328 sk
->sk_route_caps
= 0;
332 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
333 u8 type
, u8 code
, int offset
, __be32 info
)
335 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
336 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
337 struct ipv6_pinfo
*np
;
342 struct net
*net
= dev_net(skb
->dev
);
344 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
345 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
348 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
353 if (sk
->sk_state
== TCP_TIME_WAIT
) {
354 inet_twsk_put(inet_twsk(sk
));
359 if (sock_owned_by_user(sk
))
360 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
362 if (sk
->sk_state
== TCP_CLOSE
)
365 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
366 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
371 seq
= ntohl(th
->seq
);
372 if (sk
->sk_state
!= TCP_LISTEN
&&
373 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
380 if (type
== ICMPV6_PKT_TOOBIG
) {
381 struct dst_entry
*dst
;
383 if (sock_owned_by_user(sk
))
385 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
388 /* icmp should have updated the destination cache entry */
389 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
392 struct inet_sock
*inet
= inet_sk(sk
);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6
, 0, sizeof(fl6
));
400 fl6
.flowi6_proto
= IPPROTO_TCP
;
401 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
402 ipv6_addr_copy(&fl6
.saddr
, &np
->saddr
);
403 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
404 fl6
.flowi6_mark
= sk
->sk_mark
;
405 fl6
.fl6_dport
= inet
->inet_dport
;
406 fl6
.fl6_sport
= inet
->inet_sport
;
407 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
409 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
411 sk
->sk_err_soft
= -PTR_ERR(dst
);
418 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
419 tcp_sync_mss(sk
, dst_mtu(dst
));
420 tcp_simple_retransmit(sk
);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type
, code
, &err
);
428 /* Might be for an request_sock */
429 switch (sk
->sk_state
) {
430 struct request_sock
*req
, **prev
;
432 if (sock_owned_by_user(sk
))
435 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
436 &hdr
->saddr
, inet6_iif(skb
));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req
->sk
!= NULL
);
445 if (seq
!= tcp_rsk(req
)->snt_isn
) {
446 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
450 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
454 case TCP_SYN_RECV
: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk
)) {
458 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
462 sk
->sk_err_soft
= err
;
466 if (!sock_owned_by_user(sk
) && np
->recverr
) {
468 sk
->sk_error_report(sk
);
470 sk
->sk_err_soft
= err
;
478 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
479 struct request_values
*rvp
)
481 struct inet6_request_sock
*treq
= inet6_rsk(req
);
482 struct ipv6_pinfo
*np
= inet6_sk(sk
);
483 struct sk_buff
* skb
;
484 struct ipv6_txoptions
*opt
= NULL
;
485 struct in6_addr
* final_p
, final
;
487 struct dst_entry
*dst
;
490 memset(&fl6
, 0, sizeof(fl6
));
491 fl6
.flowi6_proto
= IPPROTO_TCP
;
492 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
493 ipv6_addr_copy(&fl6
.saddr
, &treq
->loc_addr
);
495 fl6
.flowi6_oif
= treq
->iif
;
496 fl6
.flowi6_mark
= sk
->sk_mark
;
497 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
498 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
499 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
502 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
504 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
510 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
513 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
515 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
516 err
= ip6_xmit(sk
, skb
, &fl6
, opt
);
517 err
= net_xmit_eval(err
);
521 if (opt
&& opt
!= np
->opt
)
522 sock_kfree_s(sk
, opt
, opt
->tot_len
);
527 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
528 struct request_values
*rvp
)
530 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
531 return tcp_v6_send_synack(sk
, req
, rvp
);
534 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
536 kfree_skb(inet6_rsk(req
)->pktopts
);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
541 const struct in6_addr
*addr
)
543 struct tcp_sock
*tp
= tcp_sk(sk
);
548 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
551 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
552 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
553 return &tp
->md5sig_info
->keys6
[i
].base
;
558 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
559 struct sock
*addr_sk
)
561 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
564 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
565 struct request_sock
*req
)
567 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
570 static int tcp_v6_md5_do_add(struct sock
*sk
, const struct in6_addr
*peer
,
571 char *newkey
, u8 newkeylen
)
573 /* Add key to the list */
574 struct tcp_md5sig_key
*key
;
575 struct tcp_sock
*tp
= tcp_sk(sk
);
576 struct tcp6_md5sig_key
*keys
;
578 key
= tcp_v6_md5_do_lookup(sk
, peer
);
580 /* modify existing entry - just update that one */
583 key
->keylen
= newkeylen
;
585 /* reallocate new list if current one is full. */
586 if (!tp
->md5sig_info
) {
587 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
588 if (!tp
->md5sig_info
) {
592 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
594 if (tp
->md5sig_info
->entries6
== 0 &&
595 tcp_alloc_md5sig_pool(sk
) == NULL
) {
599 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
600 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
601 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
605 if (tp
->md5sig_info
->entries6
== 0)
606 tcp_free_md5sig_pool();
610 if (tp
->md5sig_info
->entries6
)
611 memmove(keys
, tp
->md5sig_info
->keys6
,
612 (sizeof (tp
->md5sig_info
->keys6
[0]) *
613 tp
->md5sig_info
->entries6
));
615 kfree(tp
->md5sig_info
->keys6
);
616 tp
->md5sig_info
->keys6
= keys
;
617 tp
->md5sig_info
->alloced6
++;
620 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
622 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
623 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
625 tp
->md5sig_info
->entries6
++;
630 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
631 u8
*newkey
, __u8 newkeylen
)
633 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
637 static int tcp_v6_md5_do_del(struct sock
*sk
, const struct in6_addr
*peer
)
639 struct tcp_sock
*tp
= tcp_sk(sk
);
642 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
643 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
645 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
646 tp
->md5sig_info
->entries6
--;
648 if (tp
->md5sig_info
->entries6
== 0) {
649 kfree(tp
->md5sig_info
->keys6
);
650 tp
->md5sig_info
->keys6
= NULL
;
651 tp
->md5sig_info
->alloced6
= 0;
652 tcp_free_md5sig_pool();
654 /* shrink the database */
655 if (tp
->md5sig_info
->entries6
!= i
)
656 memmove(&tp
->md5sig_info
->keys6
[i
],
657 &tp
->md5sig_info
->keys6
[i
+1],
658 (tp
->md5sig_info
->entries6
- i
)
659 * sizeof (tp
->md5sig_info
->keys6
[0]));
667 static void tcp_v6_clear_md5_list (struct sock
*sk
)
669 struct tcp_sock
*tp
= tcp_sk(sk
);
672 if (tp
->md5sig_info
->entries6
) {
673 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
674 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
675 tp
->md5sig_info
->entries6
= 0;
676 tcp_free_md5sig_pool();
679 kfree(tp
->md5sig_info
->keys6
);
680 tp
->md5sig_info
->keys6
= NULL
;
681 tp
->md5sig_info
->alloced6
= 0;
683 if (tp
->md5sig_info
->entries4
) {
684 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
685 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
686 tp
->md5sig_info
->entries4
= 0;
687 tcp_free_md5sig_pool();
690 kfree(tp
->md5sig_info
->keys4
);
691 tp
->md5sig_info
->keys4
= NULL
;
692 tp
->md5sig_info
->alloced4
= 0;
695 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
698 struct tcp_md5sig cmd
;
699 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
702 if (optlen
< sizeof(cmd
))
705 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
708 if (sin6
->sin6_family
!= AF_INET6
)
711 if (!cmd
.tcpm_keylen
) {
712 if (!tcp_sk(sk
)->md5sig_info
)
714 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
715 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
716 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
719 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
722 if (!tcp_sk(sk
)->md5sig_info
) {
723 struct tcp_sock
*tp
= tcp_sk(sk
);
724 struct tcp_md5sig_info
*p
;
726 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
731 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
734 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
737 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
738 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
739 newkey
, cmd
.tcpm_keylen
);
741 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
744 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
745 const struct in6_addr
*daddr
,
746 const struct in6_addr
*saddr
, int nbytes
)
748 struct tcp6_pseudohdr
*bp
;
749 struct scatterlist sg
;
751 bp
= &hp
->md5_blk
.ip6
;
752 /* 1. TCP pseudo-header (RFC2460) */
753 ipv6_addr_copy(&bp
->saddr
, saddr
);
754 ipv6_addr_copy(&bp
->daddr
, daddr
);
755 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
756 bp
->len
= cpu_to_be32(nbytes
);
758 sg_init_one(&sg
, bp
, sizeof(*bp
));
759 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
762 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
763 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
764 const struct tcphdr
*th
)
766 struct tcp_md5sig_pool
*hp
;
767 struct hash_desc
*desc
;
769 hp
= tcp_get_md5sig_pool();
771 goto clear_hash_noput
;
772 desc
= &hp
->md5_desc
;
774 if (crypto_hash_init(desc
))
776 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
778 if (tcp_md5_hash_header(hp
, th
))
780 if (tcp_md5_hash_key(hp
, key
))
782 if (crypto_hash_final(desc
, md5_hash
))
785 tcp_put_md5sig_pool();
789 tcp_put_md5sig_pool();
791 memset(md5_hash
, 0, 16);
795 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
796 const struct sock
*sk
,
797 const struct request_sock
*req
,
798 const struct sk_buff
*skb
)
800 const struct in6_addr
*saddr
, *daddr
;
801 struct tcp_md5sig_pool
*hp
;
802 struct hash_desc
*desc
;
803 const struct tcphdr
*th
= tcp_hdr(skb
);
806 saddr
= &inet6_sk(sk
)->saddr
;
807 daddr
= &inet6_sk(sk
)->daddr
;
809 saddr
= &inet6_rsk(req
)->loc_addr
;
810 daddr
= &inet6_rsk(req
)->rmt_addr
;
812 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
813 saddr
= &ip6h
->saddr
;
814 daddr
= &ip6h
->daddr
;
817 hp
= tcp_get_md5sig_pool();
819 goto clear_hash_noput
;
820 desc
= &hp
->md5_desc
;
822 if (crypto_hash_init(desc
))
825 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
827 if (tcp_md5_hash_header(hp
, th
))
829 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
831 if (tcp_md5_hash_key(hp
, key
))
833 if (crypto_hash_final(desc
, md5_hash
))
836 tcp_put_md5sig_pool();
840 tcp_put_md5sig_pool();
842 memset(md5_hash
, 0, 16);
846 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
848 const __u8
*hash_location
= NULL
;
849 struct tcp_md5sig_key
*hash_expected
;
850 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
851 const struct tcphdr
*th
= tcp_hdr(skb
);
855 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
856 hash_location
= tcp_parse_md5sig_option(th
);
858 /* We've parsed the options - do we have a hash? */
859 if (!hash_expected
&& !hash_location
)
862 if (hash_expected
&& !hash_location
) {
863 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
867 if (!hash_expected
&& hash_location
) {
868 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
872 /* check the signature */
873 genhash
= tcp_v6_md5_hash_skb(newhash
,
877 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
878 if (net_ratelimit()) {
879 printk(KERN_INFO
"MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash
? "failed" : "mismatch",
881 &ip6h
->saddr
, ntohs(th
->source
),
882 &ip6h
->daddr
, ntohs(th
->dest
));
890 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
892 .obj_size
= sizeof(struct tcp6_request_sock
),
893 .rtx_syn_ack
= tcp_v6_rtx_synack
,
894 .send_ack
= tcp_v6_reqsk_send_ack
,
895 .destructor
= tcp_v6_reqsk_destructor
,
896 .send_reset
= tcp_v6_send_reset
,
897 .syn_ack_timeout
= tcp_syn_ack_timeout
,
900 #ifdef CONFIG_TCP_MD5SIG
901 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
902 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
903 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
907 static void __tcp_v6_send_check(struct sk_buff
*skb
,
908 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
910 struct tcphdr
*th
= tcp_hdr(skb
);
912 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
913 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
914 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
915 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
917 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
918 csum_partial(th
, th
->doff
<< 2,
923 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
925 struct ipv6_pinfo
*np
= inet6_sk(sk
);
927 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
930 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
932 const struct ipv6hdr
*ipv6h
;
935 if (!pskb_may_pull(skb
, sizeof(*th
)))
938 ipv6h
= ipv6_hdr(skb
);
942 skb
->ip_summed
= CHECKSUM_PARTIAL
;
943 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
947 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
950 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
952 switch (skb
->ip_summed
) {
953 case CHECKSUM_COMPLETE
:
954 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
956 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
962 NAPI_GRO_CB(skb
)->flush
= 1;
966 return tcp_gro_receive(head
, skb
);
969 static int tcp6_gro_complete(struct sk_buff
*skb
)
971 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
972 struct tcphdr
*th
= tcp_hdr(skb
);
974 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
975 &iph
->saddr
, &iph
->daddr
, 0);
976 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
978 return tcp_gro_complete(skb
);
981 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
982 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
984 const struct tcphdr
*th
= tcp_hdr(skb
);
986 struct sk_buff
*buff
;
988 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
989 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
990 unsigned int tot_len
= sizeof(struct tcphdr
);
991 struct dst_entry
*dst
;
995 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
996 #ifdef CONFIG_TCP_MD5SIG
998 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1001 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1006 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1008 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1009 skb_reset_transport_header(buff
);
1011 /* Swap the send and the receive. */
1012 memset(t1
, 0, sizeof(*t1
));
1013 t1
->dest
= th
->source
;
1014 t1
->source
= th
->dest
;
1015 t1
->doff
= tot_len
/ 4;
1016 t1
->seq
= htonl(seq
);
1017 t1
->ack_seq
= htonl(ack
);
1018 t1
->ack
= !rst
|| !th
->ack
;
1020 t1
->window
= htons(win
);
1022 topt
= (__be32
*)(t1
+ 1);
1025 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1026 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1027 *topt
++ = htonl(tcp_time_stamp
);
1028 *topt
++ = htonl(ts
);
1031 #ifdef CONFIG_TCP_MD5SIG
1033 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1034 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1035 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1036 &ipv6_hdr(skb
)->saddr
,
1037 &ipv6_hdr(skb
)->daddr
, t1
);
1041 memset(&fl6
, 0, sizeof(fl6
));
1042 ipv6_addr_copy(&fl6
.daddr
, &ipv6_hdr(skb
)->saddr
);
1043 ipv6_addr_copy(&fl6
.saddr
, &ipv6_hdr(skb
)->daddr
);
1045 buff
->ip_summed
= CHECKSUM_PARTIAL
;
1048 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
1050 fl6
.flowi6_proto
= IPPROTO_TCP
;
1051 fl6
.flowi6_oif
= inet6_iif(skb
);
1052 fl6
.fl6_dport
= t1
->dest
;
1053 fl6
.fl6_sport
= t1
->source
;
1054 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
1056 /* Pass a socket to ip6_dst_lookup either it is for RST
1057 * Underlying function will use this to retrieve the network
1060 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
1062 skb_dst_set(buff
, dst
);
1063 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
);
1064 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1066 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1073 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1075 const struct tcphdr
*th
= tcp_hdr(skb
);
1076 u32 seq
= 0, ack_seq
= 0;
1077 struct tcp_md5sig_key
*key
= NULL
;
1082 if (!ipv6_unicast_destination(skb
))
1085 #ifdef CONFIG_TCP_MD5SIG
1087 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1091 seq
= ntohl(th
->ack_seq
);
1093 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1096 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1099 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1100 struct tcp_md5sig_key
*key
)
1102 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1105 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1107 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1108 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1110 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1111 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1112 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1117 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1118 struct request_sock
*req
)
1120 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1121 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1125 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1127 struct request_sock
*req
, **prev
;
1128 const struct tcphdr
*th
= tcp_hdr(skb
);
1131 /* Find possible connection requests. */
1132 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1133 &ipv6_hdr(skb
)->saddr
,
1134 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1136 return tcp_check_req(sk
, skb
, req
, prev
);
1138 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1139 &ipv6_hdr(skb
)->saddr
, th
->source
,
1140 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1143 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1147 inet_twsk_put(inet_twsk(nsk
));
1151 #ifdef CONFIG_SYN_COOKIES
1153 sk
= cookie_v6_check(sk
, skb
);
1158 /* FIXME: this is substantially similar to the ipv4 code.
1159 * Can some kind of merge be done? -- erics
1161 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1163 struct tcp_extend_values tmp_ext
;
1164 struct tcp_options_received tmp_opt
;
1165 const u8
*hash_location
;
1166 struct request_sock
*req
;
1167 struct inet6_request_sock
*treq
;
1168 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1169 struct tcp_sock
*tp
= tcp_sk(sk
);
1170 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1171 struct dst_entry
*dst
= NULL
;
1172 int want_cookie
= 0;
1174 if (skb
->protocol
== htons(ETH_P_IP
))
1175 return tcp_v4_conn_request(sk
, skb
);
1177 if (!ipv6_unicast_destination(skb
))
1180 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1181 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1186 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1189 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1193 #ifdef CONFIG_TCP_MD5SIG
1194 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1197 tcp_clear_options(&tmp_opt
);
1198 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1199 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1200 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1202 if (tmp_opt
.cookie_plus
> 0 &&
1203 tmp_opt
.saw_tstamp
&&
1204 !tp
->rx_opt
.cookie_out_never
&&
1205 (sysctl_tcp_cookie_size
> 0 ||
1206 (tp
->cookie_values
!= NULL
&&
1207 tp
->cookie_values
->cookie_desired
> 0))) {
1210 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1211 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1213 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1216 /* Secret recipe starts with IP addresses */
1217 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1222 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1228 /* plus variable length Initiator Cookie */
1231 *c
++ ^= *hash_location
++;
1233 want_cookie
= 0; /* not our kind of cookie */
1234 tmp_ext
.cookie_out_never
= 0; /* false */
1235 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1236 } else if (!tp
->rx_opt
.cookie_in_always
) {
1237 /* redundant indications, but ensure initialization. */
1238 tmp_ext
.cookie_out_never
= 1; /* true */
1239 tmp_ext
.cookie_plus
= 0;
1243 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1245 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1246 tcp_clear_options(&tmp_opt
);
1248 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1249 tcp_openreq_init(req
, &tmp_opt
, skb
);
1251 treq
= inet6_rsk(req
);
1252 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1253 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1254 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1255 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1258 struct inet_peer
*peer
= NULL
;
1260 if (ipv6_opt_accepted(sk
, skb
) ||
1261 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1262 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1263 atomic_inc(&skb
->users
);
1264 treq
->pktopts
= skb
;
1266 treq
->iif
= sk
->sk_bound_dev_if
;
1268 /* So that link locals have meaning */
1269 if (!sk
->sk_bound_dev_if
&&
1270 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1271 treq
->iif
= inet6_iif(skb
);
1274 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1275 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1279 /* VJ's idea. We save last timestamp seen
1280 * from the destination in peer table, when entering
1281 * state TIME-WAIT, and check against it before
1282 * accepting new connection request.
1284 * If "isn" is not zero, this request hit alive
1285 * timewait bucket, so that all the necessary checks
1286 * are made in the function processing timewait state.
1288 if (tmp_opt
.saw_tstamp
&&
1289 tcp_death_row
.sysctl_tw_recycle
&&
1290 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1291 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1292 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1294 inet_peer_refcheck(peer
);
1295 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1296 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1298 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1299 goto drop_and_release
;
1302 /* Kill the following clause, if you dislike this way. */
1303 else if (!sysctl_tcp_syncookies
&&
1304 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1305 (sysctl_max_syn_backlog
>> 2)) &&
1306 (!peer
|| !peer
->tcp_ts_stamp
) &&
1307 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1308 /* Without syncookies last quarter of
1309 * backlog is filled with destinations,
1310 * proven to be alive.
1311 * It means that we continue to communicate
1312 * to destinations, already remembered
1313 * to the moment of synflood.
1315 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1316 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1317 goto drop_and_release
;
1320 isn
= tcp_v6_init_sequence(skb
);
1323 tcp_rsk(req
)->snt_isn
= isn
;
1324 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1326 security_inet_conn_request(sk
, skb
, req
);
1328 if (tcp_v6_send_synack(sk
, req
,
1329 (struct request_values
*)&tmp_ext
) ||
1333 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1341 return 0; /* don't send reset */
1344 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1345 struct request_sock
*req
,
1346 struct dst_entry
*dst
)
1348 struct inet6_request_sock
*treq
;
1349 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1350 struct tcp6_sock
*newtcp6sk
;
1351 struct inet_sock
*newinet
;
1352 struct tcp_sock
*newtp
;
1354 struct ipv6_txoptions
*opt
;
1355 #ifdef CONFIG_TCP_MD5SIG
1356 struct tcp_md5sig_key
*key
;
1359 if (skb
->protocol
== htons(ETH_P_IP
)) {
1364 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1369 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1370 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1372 newinet
= inet_sk(newsk
);
1373 newnp
= inet6_sk(newsk
);
1374 newtp
= tcp_sk(newsk
);
1376 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1378 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1380 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1382 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1384 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1385 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1386 #ifdef CONFIG_TCP_MD5SIG
1387 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1390 newnp
->ipv6_ac_list
= NULL
;
1391 newnp
->ipv6_fl_list
= NULL
;
1392 newnp
->pktoptions
= NULL
;
1394 newnp
->mcast_oif
= inet6_iif(skb
);
1395 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1398 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1399 * here, tcp_create_openreq_child now does this for us, see the comment in
1400 * that function for the gory details. -acme
1403 /* It is tricky place. Until this moment IPv4 tcp
1404 worked with IPv6 icsk.icsk_af_ops.
1407 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1412 treq
= inet6_rsk(req
);
1415 if (sk_acceptq_is_full(sk
))
1419 dst
= inet6_csk_route_req(sk
, req
);
1424 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1429 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1430 * count here, tcp_create_openreq_child now does this for us, see the
1431 * comment in that function for the gory details. -acme
1434 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1435 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1437 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1438 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1440 newtp
= tcp_sk(newsk
);
1441 newinet
= inet_sk(newsk
);
1442 newnp
= inet6_sk(newsk
);
1444 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1446 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1447 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1448 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1449 newsk
->sk_bound_dev_if
= treq
->iif
;
1451 /* Now IPv6 options...
1453 First: no IPv4 options.
1455 newinet
->inet_opt
= NULL
;
1456 newnp
->ipv6_ac_list
= NULL
;
1457 newnp
->ipv6_fl_list
= NULL
;
1460 newnp
->rxopt
.all
= np
->rxopt
.all
;
1462 /* Clone pktoptions received with SYN */
1463 newnp
->pktoptions
= NULL
;
1464 if (treq
->pktopts
!= NULL
) {
1465 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1466 kfree_skb(treq
->pktopts
);
1467 treq
->pktopts
= NULL
;
1468 if (newnp
->pktoptions
)
1469 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1472 newnp
->mcast_oif
= inet6_iif(skb
);
1473 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1475 /* Clone native IPv6 options from listening socket (if any)
1477 Yes, keeping reference count would be much more clever,
1478 but we make one more one thing there: reattach optmem
1482 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1484 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1487 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1489 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1490 newnp
->opt
->opt_flen
);
1492 tcp_mtup_init(newsk
);
1493 tcp_sync_mss(newsk
, dst_mtu(dst
));
1494 newtp
->advmss
= dst_metric_advmss(dst
);
1495 tcp_initialize_rcv_mss(newsk
);
1496 if (tcp_rsk(req
)->snt_synack
)
1497 tcp_valid_rtt_meas(newsk
,
1498 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1499 newtp
->total_retrans
= req
->retrans
;
1501 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1502 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1504 #ifdef CONFIG_TCP_MD5SIG
1505 /* Copy over the MD5 key from the original socket */
1506 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1507 /* We're using one, so create a matching key
1508 * on the newsk structure. If we fail to get
1509 * memory, then we end up not copying the key
1512 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1514 tcp_v6_md5_do_add(newsk
, &newnp
->daddr
,
1515 newkey
, key
->keylen
);
1519 if (__inet_inherit_port(sk
, newsk
) < 0) {
1523 __inet6_hash(newsk
, NULL
);
1528 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1530 if (opt
&& opt
!= np
->opt
)
1531 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1534 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1538 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1540 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1541 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1542 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1543 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1548 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1549 &ipv6_hdr(skb
)->saddr
,
1550 &ipv6_hdr(skb
)->daddr
, 0));
1552 if (skb
->len
<= 76) {
1553 return __skb_checksum_complete(skb
);
1558 /* The socket must have it's spinlock held when we get
1561 * We have a potential double-lock case here, so even when
1562 * doing backlog processing we use the BH locking scheme.
1563 * This is because we cannot sleep with the original spinlock
1566 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1568 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1569 struct tcp_sock
*tp
;
1570 struct sk_buff
*opt_skb
= NULL
;
1572 /* Imagine: socket is IPv6. IPv4 packet arrives,
1573 goes to IPv4 receive handler and backlogged.
1574 From backlog it always goes here. Kerboom...
1575 Fortunately, tcp_rcv_established and rcv_established
1576 handle them correctly, but it is not case with
1577 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1580 if (skb
->protocol
== htons(ETH_P_IP
))
1581 return tcp_v4_do_rcv(sk
, skb
);
1583 #ifdef CONFIG_TCP_MD5SIG
1584 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1588 if (sk_filter(sk
, skb
))
1592 * socket locking is here for SMP purposes as backlog rcv
1593 * is currently called with bh processing disabled.
1596 /* Do Stevens' IPV6_PKTOPTIONS.
1598 Yes, guys, it is the only place in our code, where we
1599 may make it not affecting IPv4.
1600 The rest of code is protocol independent,
1601 and I do not like idea to uglify IPv4.
1603 Actually, all the idea behind IPV6_PKTOPTIONS
1604 looks not very well thought. For now we latch
1605 options, received in the last packet, enqueued
1606 by tcp. Feel free to propose better solution.
1610 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1612 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1613 sock_rps_save_rxhash(sk
, skb
);
1614 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1617 goto ipv6_pktoptions
;
1621 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1624 if (sk
->sk_state
== TCP_LISTEN
) {
1625 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1630 * Queue it on the new socket if the new socket is active,
1631 * otherwise we just shortcircuit this and continue with
1635 sock_rps_save_rxhash(nsk
, skb
);
1636 if (tcp_child_process(sk
, nsk
, skb
))
1639 __kfree_skb(opt_skb
);
1643 sock_rps_save_rxhash(sk
, skb
);
1645 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1648 goto ipv6_pktoptions
;
1652 tcp_v6_send_reset(sk
, skb
);
1655 __kfree_skb(opt_skb
);
1659 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1664 /* Do you ask, what is it?
1666 1. skb was enqueued by tcp.
1667 2. skb is added to tail of read queue, rather than out of order.
1668 3. socket is not in passive state.
1669 4. Finally, it really contains options, which user wants to receive.
1672 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1673 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1674 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1675 np
->mcast_oif
= inet6_iif(opt_skb
);
1676 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1677 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1678 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1679 skb_set_owner_r(opt_skb
, sk
);
1680 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1682 __kfree_skb(opt_skb
);
1683 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1691 static int tcp_v6_rcv(struct sk_buff
*skb
)
1693 const struct tcphdr
*th
;
1694 const struct ipv6hdr
*hdr
;
1697 struct net
*net
= dev_net(skb
->dev
);
1699 if (skb
->pkt_type
!= PACKET_HOST
)
1703 * Count it even if it's bad.
1705 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1707 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1712 if (th
->doff
< sizeof(struct tcphdr
)/4)
1714 if (!pskb_may_pull(skb
, th
->doff
*4))
1717 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1721 hdr
= ipv6_hdr(skb
);
1722 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1723 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1724 skb
->len
- th
->doff
*4);
1725 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1726 TCP_SKB_CB(skb
)->when
= 0;
1727 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1728 TCP_SKB_CB(skb
)->sacked
= 0;
1730 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1735 if (sk
->sk_state
== TCP_TIME_WAIT
)
1738 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1739 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1740 goto discard_and_relse
;
1743 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1744 goto discard_and_relse
;
1746 if (sk_filter(sk
, skb
))
1747 goto discard_and_relse
;
1751 bh_lock_sock_nested(sk
);
1753 if (!sock_owned_by_user(sk
)) {
1754 #ifdef CONFIG_NET_DMA
1755 struct tcp_sock
*tp
= tcp_sk(sk
);
1756 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1757 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1758 if (tp
->ucopy
.dma_chan
)
1759 ret
= tcp_v6_do_rcv(sk
, skb
);
1763 if (!tcp_prequeue(sk
, skb
))
1764 ret
= tcp_v6_do_rcv(sk
, skb
);
1766 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1768 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1769 goto discard_and_relse
;
1774 return ret
? -1 : 0;
1777 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1780 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1782 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1784 tcp_v6_send_reset(NULL
, skb
);
1801 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1802 inet_twsk_put(inet_twsk(sk
));
1806 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1807 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1808 inet_twsk_put(inet_twsk(sk
));
1812 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1817 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1818 &ipv6_hdr(skb
)->daddr
,
1819 ntohs(th
->dest
), inet6_iif(skb
));
1821 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1822 inet_twsk_deschedule(tw
, &tcp_death_row
);
1827 /* Fall through to ACK */
1830 tcp_v6_timewait_ack(sk
, skb
);
1834 case TCP_TW_SUCCESS
:;
1839 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1841 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1842 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1843 struct inet_peer
*peer
;
1846 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1847 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1851 rt6_bind_peer(rt
, 1);
1852 peer
= rt
->rt6i_peer
;
1853 *release_it
= false;
1859 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1861 const struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1862 const struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1864 if (tw
->tw_family
== AF_INET
)
1865 return tcp_v4_tw_get_peer(sk
);
1867 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1870 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1871 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1872 .twsk_unique
= tcp_twsk_unique
,
1873 .twsk_destructor
= tcp_twsk_destructor
,
1874 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1877 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1878 .queue_xmit
= inet6_csk_xmit
,
1879 .send_check
= tcp_v6_send_check
,
1880 .rebuild_header
= inet6_sk_rebuild_header
,
1881 .conn_request
= tcp_v6_conn_request
,
1882 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1883 .get_peer
= tcp_v6_get_peer
,
1884 .net_header_len
= sizeof(struct ipv6hdr
),
1885 .setsockopt
= ipv6_setsockopt
,
1886 .getsockopt
= ipv6_getsockopt
,
1887 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1888 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1889 .bind_conflict
= inet6_csk_bind_conflict
,
1890 #ifdef CONFIG_COMPAT
1891 .compat_setsockopt
= compat_ipv6_setsockopt
,
1892 .compat_getsockopt
= compat_ipv6_getsockopt
,
1896 #ifdef CONFIG_TCP_MD5SIG
1897 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1898 .md5_lookup
= tcp_v6_md5_lookup
,
1899 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1900 .md5_add
= tcp_v6_md5_add_func
,
1901 .md5_parse
= tcp_v6_parse_md5_keys
,
1906 * TCP over IPv4 via INET6 API
1909 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1910 .queue_xmit
= ip_queue_xmit
,
1911 .send_check
= tcp_v4_send_check
,
1912 .rebuild_header
= inet_sk_rebuild_header
,
1913 .conn_request
= tcp_v6_conn_request
,
1914 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1915 .get_peer
= tcp_v4_get_peer
,
1916 .net_header_len
= sizeof(struct iphdr
),
1917 .setsockopt
= ipv6_setsockopt
,
1918 .getsockopt
= ipv6_getsockopt
,
1919 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1920 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1921 .bind_conflict
= inet6_csk_bind_conflict
,
1922 #ifdef CONFIG_COMPAT
1923 .compat_setsockopt
= compat_ipv6_setsockopt
,
1924 .compat_getsockopt
= compat_ipv6_getsockopt
,
1928 #ifdef CONFIG_TCP_MD5SIG
1929 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1930 .md5_lookup
= tcp_v4_md5_lookup
,
1931 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1932 .md5_add
= tcp_v6_md5_add_func
,
1933 .md5_parse
= tcp_v6_parse_md5_keys
,
1937 /* NOTE: A lot of things set to zero explicitly by call to
1938 * sk_alloc() so need not be done here.
1940 static int tcp_v6_init_sock(struct sock
*sk
)
1942 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1943 struct tcp_sock
*tp
= tcp_sk(sk
);
1945 skb_queue_head_init(&tp
->out_of_order_queue
);
1946 tcp_init_xmit_timers(sk
);
1947 tcp_prequeue_init(tp
);
1949 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1950 tp
->mdev
= TCP_TIMEOUT_INIT
;
1952 /* So many TCP implementations out there (incorrectly) count the
1953 * initial SYN frame in their delayed-ACK and congestion control
1954 * algorithms that we must have the following bandaid to talk
1955 * efficiently to them. -DaveM
1959 /* See draft-stevens-tcpca-spec-01 for discussion of the
1960 * initialization of these values.
1962 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1963 tp
->snd_cwnd_clamp
= ~0;
1964 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1966 tp
->reordering
= sysctl_tcp_reordering
;
1968 sk
->sk_state
= TCP_CLOSE
;
1970 icsk
->icsk_af_ops
= &ipv6_specific
;
1971 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1972 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1973 sk
->sk_write_space
= sk_stream_write_space
;
1974 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1976 #ifdef CONFIG_TCP_MD5SIG
1977 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1980 /* TCP Cookie Transactions */
1981 if (sysctl_tcp_cookie_size
> 0) {
1982 /* Default, cookies without s_data_payload. */
1984 kzalloc(sizeof(*tp
->cookie_values
),
1986 if (tp
->cookie_values
!= NULL
)
1987 kref_init(&tp
->cookie_values
->kref
);
1989 /* Presumed zeroed, in order of appearance:
1990 * cookie_in_always, cookie_out_never,
1991 * s_data_constant, s_data_in, s_data_out
1993 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1994 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1997 percpu_counter_inc(&tcp_sockets_allocated
);
2003 static void tcp_v6_destroy_sock(struct sock
*sk
)
2005 #ifdef CONFIG_TCP_MD5SIG
2006 /* Clean up the MD5 key list */
2007 if (tcp_sk(sk
)->md5sig_info
)
2008 tcp_v6_clear_md5_list(sk
);
2010 tcp_v4_destroy_sock(sk
);
2011 inet6_destroy_sock(sk
);
2014 #ifdef CONFIG_PROC_FS
2015 /* Proc filesystem TCPv6 sock list dumping. */
2016 static void get_openreq6(struct seq_file
*seq
,
2017 const struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
2019 int ttd
= req
->expires
- jiffies
;
2020 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
2021 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
2027 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2028 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2030 src
->s6_addr32
[0], src
->s6_addr32
[1],
2031 src
->s6_addr32
[2], src
->s6_addr32
[3],
2032 ntohs(inet_rsk(req
)->loc_port
),
2033 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2034 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
2035 ntohs(inet_rsk(req
)->rmt_port
),
2037 0,0, /* could print option size, but that is af dependent. */
2038 1, /* timers active (only the expire timer) */
2039 jiffies_to_clock_t(ttd
),
2042 0, /* non standard timer */
2043 0, /* open_requests have no inode */
2047 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2049 const struct in6_addr
*dest
, *src
;
2052 unsigned long timer_expires
;
2053 const struct inet_sock
*inet
= inet_sk(sp
);
2054 const struct tcp_sock
*tp
= tcp_sk(sp
);
2055 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2056 const struct ipv6_pinfo
*np
= inet6_sk(sp
);
2059 src
= &np
->rcv_saddr
;
2060 destp
= ntohs(inet
->inet_dport
);
2061 srcp
= ntohs(inet
->inet_sport
);
2063 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2065 timer_expires
= icsk
->icsk_timeout
;
2066 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2068 timer_expires
= icsk
->icsk_timeout
;
2069 } else if (timer_pending(&sp
->sk_timer
)) {
2071 timer_expires
= sp
->sk_timer
.expires
;
2074 timer_expires
= jiffies
;
2078 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2079 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2081 src
->s6_addr32
[0], src
->s6_addr32
[1],
2082 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2083 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2084 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2086 tp
->write_seq
-tp
->snd_una
,
2087 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2089 jiffies_to_clock_t(timer_expires
- jiffies
),
2090 icsk
->icsk_retransmits
,
2092 icsk
->icsk_probes_out
,
2094 atomic_read(&sp
->sk_refcnt
), sp
,
2095 jiffies_to_clock_t(icsk
->icsk_rto
),
2096 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2097 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2099 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
2103 static void get_timewait6_sock(struct seq_file
*seq
,
2104 struct inet_timewait_sock
*tw
, int i
)
2106 const struct in6_addr
*dest
, *src
;
2108 const struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2109 int ttd
= tw
->tw_ttd
- jiffies
;
2114 dest
= &tw6
->tw_v6_daddr
;
2115 src
= &tw6
->tw_v6_rcv_saddr
;
2116 destp
= ntohs(tw
->tw_dport
);
2117 srcp
= ntohs(tw
->tw_sport
);
2120 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2121 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2123 src
->s6_addr32
[0], src
->s6_addr32
[1],
2124 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2125 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2126 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2127 tw
->tw_substate
, 0, 0,
2128 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2129 atomic_read(&tw
->tw_refcnt
), tw
);
2132 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2134 struct tcp_iter_state
*st
;
2136 if (v
== SEQ_START_TOKEN
) {
2141 "st tx_queue rx_queue tr tm->when retrnsmt"
2142 " uid timeout inode\n");
2147 switch (st
->state
) {
2148 case TCP_SEQ_STATE_LISTENING
:
2149 case TCP_SEQ_STATE_ESTABLISHED
:
2150 get_tcp6_sock(seq
, v
, st
->num
);
2152 case TCP_SEQ_STATE_OPENREQ
:
2153 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2155 case TCP_SEQ_STATE_TIME_WAIT
:
2156 get_timewait6_sock(seq
, v
, st
->num
);
2163 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2167 .owner
= THIS_MODULE
,
2170 .show
= tcp6_seq_show
,
2174 int __net_init
tcp6_proc_init(struct net
*net
)
2176 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2179 void tcp6_proc_exit(struct net
*net
)
2181 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2185 struct proto tcpv6_prot
= {
2187 .owner
= THIS_MODULE
,
2189 .connect
= tcp_v6_connect
,
2190 .disconnect
= tcp_disconnect
,
2191 .accept
= inet_csk_accept
,
2193 .init
= tcp_v6_init_sock
,
2194 .destroy
= tcp_v6_destroy_sock
,
2195 .shutdown
= tcp_shutdown
,
2196 .setsockopt
= tcp_setsockopt
,
2197 .getsockopt
= tcp_getsockopt
,
2198 .recvmsg
= tcp_recvmsg
,
2199 .sendmsg
= tcp_sendmsg
,
2200 .sendpage
= tcp_sendpage
,
2201 .backlog_rcv
= tcp_v6_do_rcv
,
2202 .hash
= tcp_v6_hash
,
2203 .unhash
= inet_unhash
,
2204 .get_port
= inet_csk_get_port
,
2205 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2206 .sockets_allocated
= &tcp_sockets_allocated
,
2207 .memory_allocated
= &tcp_memory_allocated
,
2208 .memory_pressure
= &tcp_memory_pressure
,
2209 .orphan_count
= &tcp_orphan_count
,
2210 .sysctl_mem
= sysctl_tcp_mem
,
2211 .sysctl_wmem
= sysctl_tcp_wmem
,
2212 .sysctl_rmem
= sysctl_tcp_rmem
,
2213 .max_header
= MAX_TCP_HEADER
,
2214 .obj_size
= sizeof(struct tcp6_sock
),
2215 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2216 .twsk_prot
= &tcp6_timewait_sock_ops
,
2217 .rsk_prot
= &tcp6_request_sock_ops
,
2218 .h
.hashinfo
= &tcp_hashinfo
,
2219 .no_autobind
= true,
2220 #ifdef CONFIG_COMPAT
2221 .compat_setsockopt
= compat_tcp_setsockopt
,
2222 .compat_getsockopt
= compat_tcp_getsockopt
,
2226 static const struct inet6_protocol tcpv6_protocol
= {
2227 .handler
= tcp_v6_rcv
,
2228 .err_handler
= tcp_v6_err
,
2229 .gso_send_check
= tcp_v6_gso_send_check
,
2230 .gso_segment
= tcp_tso_segment
,
2231 .gro_receive
= tcp6_gro_receive
,
2232 .gro_complete
= tcp6_gro_complete
,
2233 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2236 static struct inet_protosw tcpv6_protosw
= {
2237 .type
= SOCK_STREAM
,
2238 .protocol
= IPPROTO_TCP
,
2239 .prot
= &tcpv6_prot
,
2240 .ops
= &inet6_stream_ops
,
2242 .flags
= INET_PROTOSW_PERMANENT
|
2246 static int __net_init
tcpv6_net_init(struct net
*net
)
2248 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2249 SOCK_RAW
, IPPROTO_TCP
, net
);
2252 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2254 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2257 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2259 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2262 static struct pernet_operations tcpv6_net_ops
= {
2263 .init
= tcpv6_net_init
,
2264 .exit
= tcpv6_net_exit
,
2265 .exit_batch
= tcpv6_net_exit_batch
,
2268 int __init
tcpv6_init(void)
2272 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2276 /* register inet6 protocol */
2277 ret
= inet6_register_protosw(&tcpv6_protosw
);
2279 goto out_tcpv6_protocol
;
2281 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2283 goto out_tcpv6_protosw
;
2288 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2290 inet6_unregister_protosw(&tcpv6_protosw
);
2294 void tcpv6_exit(void)
2296 unregister_pernet_subsys(&tcpv6_net_ops
);
2297 inet6_unregister_protosw(&tcpv6_protosw
);
2298 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);