3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
77 struct request_sock
*req
);
79 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static void __tcp_v6_send_check(struct sk_buff
*skb
,
81 const struct in6_addr
*saddr
,
82 const struct in6_addr
*daddr
);
84 static const struct inet_connection_sock_af_ops ipv6_mapped
;
85 static const struct inet_connection_sock_af_ops ipv6_specific
;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
90 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
91 const struct in6_addr
*addr
)
97 static void tcp_v6_hash(struct sock
*sk
)
99 if (sk
->sk_state
!= TCP_CLOSE
) {
100 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
105 __inet6_hash(sk
, NULL
);
110 static __inline__ __sum16
tcp_v6_check(int len
,
111 const struct in6_addr
*saddr
,
112 const struct in6_addr
*daddr
,
115 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
118 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
121 ipv6_hdr(skb
)->saddr
.s6_addr32
,
123 tcp_hdr(skb
)->source
);
126 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
129 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
130 struct inet_sock
*inet
= inet_sk(sk
);
131 struct inet_connection_sock
*icsk
= inet_csk(sk
);
132 struct ipv6_pinfo
*np
= inet6_sk(sk
);
133 struct tcp_sock
*tp
= tcp_sk(sk
);
134 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
137 struct dst_entry
*dst
;
141 if (addr_len
< SIN6_LEN_RFC2133
)
144 if (usin
->sin6_family
!= AF_INET6
)
145 return -EAFNOSUPPORT
;
147 memset(&fl6
, 0, sizeof(fl6
));
150 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
151 IP6_ECN_flow_init(fl6
.flowlabel
);
152 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
153 struct ip6_flowlabel
*flowlabel
;
154 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
155 if (flowlabel
== NULL
)
157 usin
->sin6_addr
= flowlabel
->dst
;
158 fl6_sock_release(flowlabel
);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin
->sin6_addr
))
167 usin
->sin6_addr
.s6_addr
[15] = 0x1;
169 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
171 if(addr_type
& IPV6_ADDR_MULTICAST
)
174 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
175 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
176 usin
->sin6_scope_id
) {
177 /* If interface is set while binding, indices
180 if (sk
->sk_bound_dev_if
&&
181 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
184 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
187 /* Connect to link-local address requires an interface */
188 if (!sk
->sk_bound_dev_if
)
192 if (tp
->rx_opt
.ts_recent_stamp
&&
193 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
199 np
->daddr
= usin
->sin6_addr
;
200 np
->flow_label
= fl6
.flowlabel
;
206 if (addr_type
== IPV6_ADDR_MAPPED
) {
207 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
208 struct sockaddr_in sin
;
210 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk
))
215 sin
.sin_family
= AF_INET
;
216 sin
.sin_port
= usin
->sin6_port
;
217 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
219 icsk
->icsk_af_ops
= &ipv6_mapped
;
220 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
225 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
228 icsk
->icsk_ext_hdr_len
= exthdrlen
;
229 icsk
->icsk_af_ops
= &ipv6_specific
;
230 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp
->af_specific
= &tcp_sock_ipv6_specific
;
236 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
237 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
244 if (!ipv6_addr_any(&np
->rcv_saddr
))
245 saddr
= &np
->rcv_saddr
;
247 fl6
.flowi6_proto
= IPPROTO_TCP
;
248 fl6
.daddr
= np
->daddr
;
249 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 np
->rcv_saddr
= *saddr
;
270 /* set the source address */
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
281 struct inet_peer
*peer
= rt6_get_peer(rt
);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer
);
290 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
291 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
292 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
297 icsk
->icsk_ext_hdr_len
= 0;
299 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
302 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
304 inet
->inet_dport
= usin
->sin6_port
;
306 tcp_set_state(sk
, TCP_SYN_SENT
);
307 err
= inet6_hash_connect(&tcp_death_row
, sk
);
312 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
317 err
= tcp_connect(sk
);
324 tcp_set_state(sk
, TCP_CLOSE
);
327 inet
->inet_dport
= 0;
328 sk
->sk_route_caps
= 0;
332 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
333 u8 type
, u8 code
, int offset
, __be32 info
)
335 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
336 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
337 struct ipv6_pinfo
*np
;
342 struct net
*net
= dev_net(skb
->dev
);
344 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
345 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
348 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
353 if (sk
->sk_state
== TCP_TIME_WAIT
) {
354 inet_twsk_put(inet_twsk(sk
));
359 if (sock_owned_by_user(sk
))
360 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
362 if (sk
->sk_state
== TCP_CLOSE
)
365 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
366 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
371 seq
= ntohl(th
->seq
);
372 if (sk
->sk_state
!= TCP_LISTEN
&&
373 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
380 if (type
== ICMPV6_PKT_TOOBIG
) {
381 struct dst_entry
*dst
;
383 if (sock_owned_by_user(sk
))
385 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
388 /* icmp should have updated the destination cache entry */
389 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
392 struct inet_sock
*inet
= inet_sk(sk
);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6
, 0, sizeof(fl6
));
400 fl6
.flowi6_proto
= IPPROTO_TCP
;
401 fl6
.daddr
= np
->daddr
;
402 fl6
.saddr
= np
->saddr
;
403 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
404 fl6
.flowi6_mark
= sk
->sk_mark
;
405 fl6
.fl6_dport
= inet
->inet_dport
;
406 fl6
.fl6_sport
= inet
->inet_sport
;
407 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
409 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
411 sk
->sk_err_soft
= -PTR_ERR(dst
);
418 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
419 tcp_sync_mss(sk
, dst_mtu(dst
));
420 tcp_simple_retransmit(sk
);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type
, code
, &err
);
428 /* Might be for an request_sock */
429 switch (sk
->sk_state
) {
430 struct request_sock
*req
, **prev
;
432 if (sock_owned_by_user(sk
))
435 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
436 &hdr
->saddr
, inet6_iif(skb
));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req
->sk
!= NULL
);
445 if (seq
!= tcp_rsk(req
)->snt_isn
) {
446 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
450 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
454 case TCP_SYN_RECV
: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk
)) {
458 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
462 sk
->sk_err_soft
= err
;
466 if (!sock_owned_by_user(sk
) && np
->recverr
) {
468 sk
->sk_error_report(sk
);
470 sk
->sk_err_soft
= err
;
478 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
479 struct request_values
*rvp
)
481 struct inet6_request_sock
*treq
= inet6_rsk(req
);
482 struct ipv6_pinfo
*np
= inet6_sk(sk
);
483 struct sk_buff
* skb
;
484 struct ipv6_txoptions
*opt
= NULL
;
485 struct in6_addr
* final_p
, final
;
487 struct dst_entry
*dst
;
490 memset(&fl6
, 0, sizeof(fl6
));
491 fl6
.flowi6_proto
= IPPROTO_TCP
;
492 fl6
.daddr
= treq
->rmt_addr
;
493 fl6
.saddr
= treq
->loc_addr
;
495 fl6
.flowi6_oif
= treq
->iif
;
496 fl6
.flowi6_mark
= sk
->sk_mark
;
497 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
498 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
499 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
502 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
504 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
510 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
513 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
515 fl6
.daddr
= treq
->rmt_addr
;
516 err
= ip6_xmit(sk
, skb
, &fl6
, opt
, np
->tclass
);
517 err
= net_xmit_eval(err
);
521 if (opt
&& opt
!= np
->opt
)
522 sock_kfree_s(sk
, opt
, opt
->tot_len
);
527 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
528 struct request_values
*rvp
)
530 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
531 return tcp_v6_send_synack(sk
, req
, rvp
);
534 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
536 kfree_skb(inet6_rsk(req
)->pktopts
);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
541 const struct in6_addr
*addr
)
543 struct tcp_sock
*tp
= tcp_sk(sk
);
548 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
551 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
552 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
553 return &tp
->md5sig_info
->keys6
[i
].base
;
558 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
559 struct sock
*addr_sk
)
561 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
564 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
565 struct request_sock
*req
)
567 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
570 static int tcp_v6_md5_do_add(struct sock
*sk
, const struct in6_addr
*peer
,
571 char *newkey
, u8 newkeylen
)
573 /* Add key to the list */
574 struct tcp_md5sig_key
*key
;
575 struct tcp_sock
*tp
= tcp_sk(sk
);
576 struct tcp6_md5sig_key
*keys
;
578 key
= tcp_v6_md5_do_lookup(sk
, peer
);
580 /* modify existing entry - just update that one */
583 key
->keylen
= newkeylen
;
585 /* reallocate new list if current one is full. */
586 if (!tp
->md5sig_info
) {
587 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
588 if (!tp
->md5sig_info
) {
592 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
594 if (tp
->md5sig_info
->entries6
== 0 &&
595 tcp_alloc_md5sig_pool(sk
) == NULL
) {
599 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
600 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
601 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
605 if (tp
->md5sig_info
->entries6
== 0)
606 tcp_free_md5sig_pool();
610 if (tp
->md5sig_info
->entries6
)
611 memmove(keys
, tp
->md5sig_info
->keys6
,
612 (sizeof (tp
->md5sig_info
->keys6
[0]) *
613 tp
->md5sig_info
->entries6
));
615 kfree(tp
->md5sig_info
->keys6
);
616 tp
->md5sig_info
->keys6
= keys
;
617 tp
->md5sig_info
->alloced6
++;
620 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
= *peer
;
621 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
622 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
624 tp
->md5sig_info
->entries6
++;
629 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
630 u8
*newkey
, __u8 newkeylen
)
632 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
636 static int tcp_v6_md5_do_del(struct sock
*sk
, const struct in6_addr
*peer
)
638 struct tcp_sock
*tp
= tcp_sk(sk
);
641 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
642 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
644 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
645 tp
->md5sig_info
->entries6
--;
647 if (tp
->md5sig_info
->entries6
== 0) {
648 kfree(tp
->md5sig_info
->keys6
);
649 tp
->md5sig_info
->keys6
= NULL
;
650 tp
->md5sig_info
->alloced6
= 0;
651 tcp_free_md5sig_pool();
653 /* shrink the database */
654 if (tp
->md5sig_info
->entries6
!= i
)
655 memmove(&tp
->md5sig_info
->keys6
[i
],
656 &tp
->md5sig_info
->keys6
[i
+1],
657 (tp
->md5sig_info
->entries6
- i
)
658 * sizeof (tp
->md5sig_info
->keys6
[0]));
666 static void tcp_v6_clear_md5_list (struct sock
*sk
)
668 struct tcp_sock
*tp
= tcp_sk(sk
);
671 if (tp
->md5sig_info
->entries6
) {
672 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
673 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
674 tp
->md5sig_info
->entries6
= 0;
675 tcp_free_md5sig_pool();
678 kfree(tp
->md5sig_info
->keys6
);
679 tp
->md5sig_info
->keys6
= NULL
;
680 tp
->md5sig_info
->alloced6
= 0;
682 if (tp
->md5sig_info
->entries4
) {
683 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
684 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
685 tp
->md5sig_info
->entries4
= 0;
686 tcp_free_md5sig_pool();
689 kfree(tp
->md5sig_info
->keys4
);
690 tp
->md5sig_info
->keys4
= NULL
;
691 tp
->md5sig_info
->alloced4
= 0;
694 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
697 struct tcp_md5sig cmd
;
698 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
701 if (optlen
< sizeof(cmd
))
704 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
707 if (sin6
->sin6_family
!= AF_INET6
)
710 if (!cmd
.tcpm_keylen
) {
711 if (!tcp_sk(sk
)->md5sig_info
)
713 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
714 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
715 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
718 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
721 if (!tcp_sk(sk
)->md5sig_info
) {
722 struct tcp_sock
*tp
= tcp_sk(sk
);
723 struct tcp_md5sig_info
*p
;
725 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
730 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
733 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
736 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
737 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
738 newkey
, cmd
.tcpm_keylen
);
740 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
743 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
744 const struct in6_addr
*daddr
,
745 const struct in6_addr
*saddr
, int nbytes
)
747 struct tcp6_pseudohdr
*bp
;
748 struct scatterlist sg
;
750 bp
= &hp
->md5_blk
.ip6
;
751 /* 1. TCP pseudo-header (RFC2460) */
754 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
755 bp
->len
= cpu_to_be32(nbytes
);
757 sg_init_one(&sg
, bp
, sizeof(*bp
));
758 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
761 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
762 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
763 const struct tcphdr
*th
)
765 struct tcp_md5sig_pool
*hp
;
766 struct hash_desc
*desc
;
768 hp
= tcp_get_md5sig_pool();
770 goto clear_hash_noput
;
771 desc
= &hp
->md5_desc
;
773 if (crypto_hash_init(desc
))
775 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
777 if (tcp_md5_hash_header(hp
, th
))
779 if (tcp_md5_hash_key(hp
, key
))
781 if (crypto_hash_final(desc
, md5_hash
))
784 tcp_put_md5sig_pool();
788 tcp_put_md5sig_pool();
790 memset(md5_hash
, 0, 16);
794 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
795 const struct sock
*sk
,
796 const struct request_sock
*req
,
797 const struct sk_buff
*skb
)
799 const struct in6_addr
*saddr
, *daddr
;
800 struct tcp_md5sig_pool
*hp
;
801 struct hash_desc
*desc
;
802 const struct tcphdr
*th
= tcp_hdr(skb
);
805 saddr
= &inet6_sk(sk
)->saddr
;
806 daddr
= &inet6_sk(sk
)->daddr
;
808 saddr
= &inet6_rsk(req
)->loc_addr
;
809 daddr
= &inet6_rsk(req
)->rmt_addr
;
811 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
812 saddr
= &ip6h
->saddr
;
813 daddr
= &ip6h
->daddr
;
816 hp
= tcp_get_md5sig_pool();
818 goto clear_hash_noput
;
819 desc
= &hp
->md5_desc
;
821 if (crypto_hash_init(desc
))
824 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
826 if (tcp_md5_hash_header(hp
, th
))
828 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
830 if (tcp_md5_hash_key(hp
, key
))
832 if (crypto_hash_final(desc
, md5_hash
))
835 tcp_put_md5sig_pool();
839 tcp_put_md5sig_pool();
841 memset(md5_hash
, 0, 16);
845 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
847 const __u8
*hash_location
= NULL
;
848 struct tcp_md5sig_key
*hash_expected
;
849 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
850 const struct tcphdr
*th
= tcp_hdr(skb
);
854 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
855 hash_location
= tcp_parse_md5sig_option(th
);
857 /* We've parsed the options - do we have a hash? */
858 if (!hash_expected
&& !hash_location
)
861 if (hash_expected
&& !hash_location
) {
862 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
866 if (!hash_expected
&& hash_location
) {
867 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
871 /* check the signature */
872 genhash
= tcp_v6_md5_hash_skb(newhash
,
876 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
877 if (net_ratelimit()) {
878 printk(KERN_INFO
"MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
879 genhash
? "failed" : "mismatch",
880 &ip6h
->saddr
, ntohs(th
->source
),
881 &ip6h
->daddr
, ntohs(th
->dest
));
889 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
891 .obj_size
= sizeof(struct tcp6_request_sock
),
892 .rtx_syn_ack
= tcp_v6_rtx_synack
,
893 .send_ack
= tcp_v6_reqsk_send_ack
,
894 .destructor
= tcp_v6_reqsk_destructor
,
895 .send_reset
= tcp_v6_send_reset
,
896 .syn_ack_timeout
= tcp_syn_ack_timeout
,
899 #ifdef CONFIG_TCP_MD5SIG
900 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
901 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
902 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
906 static void __tcp_v6_send_check(struct sk_buff
*skb
,
907 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
909 struct tcphdr
*th
= tcp_hdr(skb
);
911 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
912 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
913 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
914 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
916 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
917 csum_partial(th
, th
->doff
<< 2,
922 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
924 struct ipv6_pinfo
*np
= inet6_sk(sk
);
926 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
929 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
931 const struct ipv6hdr
*ipv6h
;
934 if (!pskb_may_pull(skb
, sizeof(*th
)))
937 ipv6h
= ipv6_hdr(skb
);
941 skb
->ip_summed
= CHECKSUM_PARTIAL
;
942 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
946 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
949 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
951 switch (skb
->ip_summed
) {
952 case CHECKSUM_COMPLETE
:
953 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
955 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
961 NAPI_GRO_CB(skb
)->flush
= 1;
965 return tcp_gro_receive(head
, skb
);
968 static int tcp6_gro_complete(struct sk_buff
*skb
)
970 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
971 struct tcphdr
*th
= tcp_hdr(skb
);
973 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
974 &iph
->saddr
, &iph
->daddr
, 0);
975 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
977 return tcp_gro_complete(skb
);
980 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
981 u32 ts
, struct tcp_md5sig_key
*key
, int rst
, u8 tclass
)
983 const struct tcphdr
*th
= tcp_hdr(skb
);
985 struct sk_buff
*buff
;
987 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
988 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
989 unsigned int tot_len
= sizeof(struct tcphdr
);
990 struct dst_entry
*dst
;
994 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
995 #ifdef CONFIG_TCP_MD5SIG
997 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1000 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1005 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1007 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1008 skb_reset_transport_header(buff
);
1010 /* Swap the send and the receive. */
1011 memset(t1
, 0, sizeof(*t1
));
1012 t1
->dest
= th
->source
;
1013 t1
->source
= th
->dest
;
1014 t1
->doff
= tot_len
/ 4;
1015 t1
->seq
= htonl(seq
);
1016 t1
->ack_seq
= htonl(ack
);
1017 t1
->ack
= !rst
|| !th
->ack
;
1019 t1
->window
= htons(win
);
1021 topt
= (__be32
*)(t1
+ 1);
1024 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1025 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1026 *topt
++ = htonl(tcp_time_stamp
);
1027 *topt
++ = htonl(ts
);
1030 #ifdef CONFIG_TCP_MD5SIG
1032 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1033 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1034 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1035 &ipv6_hdr(skb
)->saddr
,
1036 &ipv6_hdr(skb
)->daddr
, t1
);
1040 memset(&fl6
, 0, sizeof(fl6
));
1041 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
1042 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
1044 buff
->ip_summed
= CHECKSUM_PARTIAL
;
1047 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
1049 fl6
.flowi6_proto
= IPPROTO_TCP
;
1050 fl6
.flowi6_oif
= inet6_iif(skb
);
1051 fl6
.fl6_dport
= t1
->dest
;
1052 fl6
.fl6_sport
= t1
->source
;
1053 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
1055 /* Pass a socket to ip6_dst_lookup either it is for RST
1056 * Underlying function will use this to retrieve the network
1059 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
1061 skb_dst_set(buff
, dst
);
1062 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
1063 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1065 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1072 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1074 const struct tcphdr
*th
= tcp_hdr(skb
);
1075 u32 seq
= 0, ack_seq
= 0;
1076 struct tcp_md5sig_key
*key
= NULL
;
1081 if (!ipv6_unicast_destination(skb
))
1084 #ifdef CONFIG_TCP_MD5SIG
1086 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->saddr
);
1090 seq
= ntohl(th
->ack_seq
);
1092 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1095 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1, 0);
1098 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1099 struct tcp_md5sig_key
*key
, u8 tclass
)
1101 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0, tclass
);
1104 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1106 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1107 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1109 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1110 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1111 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
1117 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1118 struct request_sock
*req
)
1120 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1121 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
), 0);
1125 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1127 struct request_sock
*req
, **prev
;
1128 const struct tcphdr
*th
= tcp_hdr(skb
);
1131 /* Find possible connection requests. */
1132 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1133 &ipv6_hdr(skb
)->saddr
,
1134 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1136 return tcp_check_req(sk
, skb
, req
, prev
);
1138 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1139 &ipv6_hdr(skb
)->saddr
, th
->source
,
1140 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1143 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1147 inet_twsk_put(inet_twsk(nsk
));
1151 #ifdef CONFIG_SYN_COOKIES
1153 sk
= cookie_v6_check(sk
, skb
);
1158 /* FIXME: this is substantially similar to the ipv4 code.
1159 * Can some kind of merge be done? -- erics
1161 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1163 struct tcp_extend_values tmp_ext
;
1164 struct tcp_options_received tmp_opt
;
1165 const u8
*hash_location
;
1166 struct request_sock
*req
;
1167 struct inet6_request_sock
*treq
;
1168 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1169 struct tcp_sock
*tp
= tcp_sk(sk
);
1170 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1171 struct dst_entry
*dst
= NULL
;
1172 int want_cookie
= 0;
1174 if (skb
->protocol
== htons(ETH_P_IP
))
1175 return tcp_v4_conn_request(sk
, skb
);
1177 if (!ipv6_unicast_destination(skb
))
1180 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1181 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1186 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1189 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1193 #ifdef CONFIG_TCP_MD5SIG
1194 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1197 tcp_clear_options(&tmp_opt
);
1198 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1199 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1200 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1202 if (tmp_opt
.cookie_plus
> 0 &&
1203 tmp_opt
.saw_tstamp
&&
1204 !tp
->rx_opt
.cookie_out_never
&&
1205 (sysctl_tcp_cookie_size
> 0 ||
1206 (tp
->cookie_values
!= NULL
&&
1207 tp
->cookie_values
->cookie_desired
> 0))) {
1210 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1211 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1213 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1216 /* Secret recipe starts with IP addresses */
1217 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1222 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1228 /* plus variable length Initiator Cookie */
1231 *c
++ ^= *hash_location
++;
1233 want_cookie
= 0; /* not our kind of cookie */
1234 tmp_ext
.cookie_out_never
= 0; /* false */
1235 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1236 } else if (!tp
->rx_opt
.cookie_in_always
) {
1237 /* redundant indications, but ensure initialization. */
1238 tmp_ext
.cookie_out_never
= 1; /* true */
1239 tmp_ext
.cookie_plus
= 0;
1243 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1245 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1246 tcp_clear_options(&tmp_opt
);
1248 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1249 tcp_openreq_init(req
, &tmp_opt
, skb
);
1251 treq
= inet6_rsk(req
);
1252 treq
->rmt_addr
= ipv6_hdr(skb
)->saddr
;
1253 treq
->loc_addr
= ipv6_hdr(skb
)->daddr
;
1254 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1255 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1257 treq
->iif
= sk
->sk_bound_dev_if
;
1259 /* So that link locals have meaning */
1260 if (!sk
->sk_bound_dev_if
&&
1261 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1262 treq
->iif
= inet6_iif(skb
);
1265 struct inet_peer
*peer
= NULL
;
1267 if (ipv6_opt_accepted(sk
, skb
) ||
1268 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1269 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1270 atomic_inc(&skb
->users
);
1271 treq
->pktopts
= skb
;
1275 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1276 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1280 /* VJ's idea. We save last timestamp seen
1281 * from the destination in peer table, when entering
1282 * state TIME-WAIT, and check against it before
1283 * accepting new connection request.
1285 * If "isn" is not zero, this request hit alive
1286 * timewait bucket, so that all the necessary checks
1287 * are made in the function processing timewait state.
1289 if (tmp_opt
.saw_tstamp
&&
1290 tcp_death_row
.sysctl_tw_recycle
&&
1291 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1292 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1293 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1295 inet_peer_refcheck(peer
);
1296 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1297 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1299 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1300 goto drop_and_release
;
1303 /* Kill the following clause, if you dislike this way. */
1304 else if (!sysctl_tcp_syncookies
&&
1305 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1306 (sysctl_max_syn_backlog
>> 2)) &&
1307 (!peer
|| !peer
->tcp_ts_stamp
) &&
1308 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1309 /* Without syncookies last quarter of
1310 * backlog is filled with destinations,
1311 * proven to be alive.
1312 * It means that we continue to communicate
1313 * to destinations, already remembered
1314 * to the moment of synflood.
1316 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1317 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1318 goto drop_and_release
;
1321 isn
= tcp_v6_init_sequence(skb
);
1324 tcp_rsk(req
)->snt_isn
= isn
;
1325 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1327 security_inet_conn_request(sk
, skb
, req
);
1329 if (tcp_v6_send_synack(sk
, req
,
1330 (struct request_values
*)&tmp_ext
) ||
1334 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1342 return 0; /* don't send reset */
1345 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1346 struct request_sock
*req
,
1347 struct dst_entry
*dst
)
1349 struct inet6_request_sock
*treq
;
1350 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1351 struct tcp6_sock
*newtcp6sk
;
1352 struct inet_sock
*newinet
;
1353 struct tcp_sock
*newtp
;
1355 struct ipv6_txoptions
*opt
;
1356 #ifdef CONFIG_TCP_MD5SIG
1357 struct tcp_md5sig_key
*key
;
1360 if (skb
->protocol
== htons(ETH_P_IP
)) {
1365 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1370 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1371 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1373 newinet
= inet_sk(newsk
);
1374 newnp
= inet6_sk(newsk
);
1375 newtp
= tcp_sk(newsk
);
1377 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1379 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1381 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1383 newnp
->rcv_saddr
= newnp
->saddr
;
1385 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1386 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1387 #ifdef CONFIG_TCP_MD5SIG
1388 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1391 newnp
->ipv6_ac_list
= NULL
;
1392 newnp
->ipv6_fl_list
= NULL
;
1393 newnp
->pktoptions
= NULL
;
1395 newnp
->mcast_oif
= inet6_iif(skb
);
1396 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1399 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1400 * here, tcp_create_openreq_child now does this for us, see the comment in
1401 * that function for the gory details. -acme
1404 /* It is tricky place. Until this moment IPv4 tcp
1405 worked with IPv6 icsk.icsk_af_ops.
1408 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1413 treq
= inet6_rsk(req
);
1416 if (sk_acceptq_is_full(sk
))
1420 dst
= inet6_csk_route_req(sk
, req
);
1425 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1430 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1431 * count here, tcp_create_openreq_child now does this for us, see the
1432 * comment in that function for the gory details. -acme
1435 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1436 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1438 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1439 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1441 newtp
= tcp_sk(newsk
);
1442 newinet
= inet_sk(newsk
);
1443 newnp
= inet6_sk(newsk
);
1445 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1447 newnp
->daddr
= treq
->rmt_addr
;
1448 newnp
->saddr
= treq
->loc_addr
;
1449 newnp
->rcv_saddr
= treq
->loc_addr
;
1450 newsk
->sk_bound_dev_if
= treq
->iif
;
1452 /* Now IPv6 options...
1454 First: no IPv4 options.
1456 newinet
->inet_opt
= NULL
;
1457 newnp
->ipv6_ac_list
= NULL
;
1458 newnp
->ipv6_fl_list
= NULL
;
1461 newnp
->rxopt
.all
= np
->rxopt
.all
;
1463 /* Clone pktoptions received with SYN */
1464 newnp
->pktoptions
= NULL
;
1465 if (treq
->pktopts
!= NULL
) {
1466 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1467 kfree_skb(treq
->pktopts
);
1468 treq
->pktopts
= NULL
;
1469 if (newnp
->pktoptions
)
1470 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1473 newnp
->mcast_oif
= inet6_iif(skb
);
1474 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1476 /* Clone native IPv6 options from listening socket (if any)
1478 Yes, keeping reference count would be much more clever,
1479 but we make one more one thing there: reattach optmem
1483 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1485 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1488 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1490 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1491 newnp
->opt
->opt_flen
);
1493 tcp_mtup_init(newsk
);
1494 tcp_sync_mss(newsk
, dst_mtu(dst
));
1495 newtp
->advmss
= dst_metric_advmss(dst
);
1496 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1497 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1498 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1500 tcp_initialize_rcv_mss(newsk
);
1501 if (tcp_rsk(req
)->snt_synack
)
1502 tcp_valid_rtt_meas(newsk
,
1503 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1504 newtp
->total_retrans
= req
->retrans
;
1506 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1507 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1509 #ifdef CONFIG_TCP_MD5SIG
1510 /* Copy over the MD5 key from the original socket */
1511 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1512 /* We're using one, so create a matching key
1513 * on the newsk structure. If we fail to get
1514 * memory, then we end up not copying the key
1517 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1519 tcp_v6_md5_do_add(newsk
, &newnp
->daddr
,
1520 newkey
, key
->keylen
);
1524 if (__inet_inherit_port(sk
, newsk
) < 0) {
1528 __inet6_hash(newsk
, NULL
);
1533 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1535 if (opt
&& opt
!= np
->opt
)
1536 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1539 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1543 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1545 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1546 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1547 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1548 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1553 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1554 &ipv6_hdr(skb
)->saddr
,
1555 &ipv6_hdr(skb
)->daddr
, 0));
1557 if (skb
->len
<= 76) {
1558 return __skb_checksum_complete(skb
);
1563 /* The socket must have it's spinlock held when we get
1566 * We have a potential double-lock case here, so even when
1567 * doing backlog processing we use the BH locking scheme.
1568 * This is because we cannot sleep with the original spinlock
1571 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1573 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1574 struct tcp_sock
*tp
;
1575 struct sk_buff
*opt_skb
= NULL
;
1577 /* Imagine: socket is IPv6. IPv4 packet arrives,
1578 goes to IPv4 receive handler and backlogged.
1579 From backlog it always goes here. Kerboom...
1580 Fortunately, tcp_rcv_established and rcv_established
1581 handle them correctly, but it is not case with
1582 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1585 if (skb
->protocol
== htons(ETH_P_IP
))
1586 return tcp_v4_do_rcv(sk
, skb
);
1588 #ifdef CONFIG_TCP_MD5SIG
1589 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1593 if (sk_filter(sk
, skb
))
1597 * socket locking is here for SMP purposes as backlog rcv
1598 * is currently called with bh processing disabled.
1601 /* Do Stevens' IPV6_PKTOPTIONS.
1603 Yes, guys, it is the only place in our code, where we
1604 may make it not affecting IPv4.
1605 The rest of code is protocol independent,
1606 and I do not like idea to uglify IPv4.
1608 Actually, all the idea behind IPV6_PKTOPTIONS
1609 looks not very well thought. For now we latch
1610 options, received in the last packet, enqueued
1611 by tcp. Feel free to propose better solution.
1615 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1617 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1618 sock_rps_save_rxhash(sk
, skb
);
1619 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1622 goto ipv6_pktoptions
;
1626 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1629 if (sk
->sk_state
== TCP_LISTEN
) {
1630 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1635 * Queue it on the new socket if the new socket is active,
1636 * otherwise we just shortcircuit this and continue with
1640 sock_rps_save_rxhash(nsk
, skb
);
1641 if (tcp_child_process(sk
, nsk
, skb
))
1644 __kfree_skb(opt_skb
);
1648 sock_rps_save_rxhash(sk
, skb
);
1650 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1653 goto ipv6_pktoptions
;
1657 tcp_v6_send_reset(sk
, skb
);
1660 __kfree_skb(opt_skb
);
1664 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1669 /* Do you ask, what is it?
1671 1. skb was enqueued by tcp.
1672 2. skb is added to tail of read queue, rather than out of order.
1673 3. socket is not in passive state.
1674 4. Finally, it really contains options, which user wants to receive.
1677 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1678 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1679 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1680 np
->mcast_oif
= inet6_iif(opt_skb
);
1681 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1682 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1683 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1684 skb_set_owner_r(opt_skb
, sk
);
1685 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1687 __kfree_skb(opt_skb
);
1688 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1696 static int tcp_v6_rcv(struct sk_buff
*skb
)
1698 const struct tcphdr
*th
;
1699 const struct ipv6hdr
*hdr
;
1702 struct net
*net
= dev_net(skb
->dev
);
1704 if (skb
->pkt_type
!= PACKET_HOST
)
1708 * Count it even if it's bad.
1710 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1712 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1717 if (th
->doff
< sizeof(struct tcphdr
)/4)
1719 if (!pskb_may_pull(skb
, th
->doff
*4))
1722 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1726 hdr
= ipv6_hdr(skb
);
1727 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1728 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1729 skb
->len
- th
->doff
*4);
1730 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1731 TCP_SKB_CB(skb
)->when
= 0;
1732 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1733 TCP_SKB_CB(skb
)->sacked
= 0;
1735 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1740 if (sk
->sk_state
== TCP_TIME_WAIT
)
1743 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1744 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1745 goto discard_and_relse
;
1748 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1749 goto discard_and_relse
;
1751 if (sk_filter(sk
, skb
))
1752 goto discard_and_relse
;
1756 bh_lock_sock_nested(sk
);
1758 if (!sock_owned_by_user(sk
)) {
1759 #ifdef CONFIG_NET_DMA
1760 struct tcp_sock
*tp
= tcp_sk(sk
);
1761 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1762 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1763 if (tp
->ucopy
.dma_chan
)
1764 ret
= tcp_v6_do_rcv(sk
, skb
);
1768 if (!tcp_prequeue(sk
, skb
))
1769 ret
= tcp_v6_do_rcv(sk
, skb
);
1771 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1773 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1774 goto discard_and_relse
;
1779 return ret
? -1 : 0;
1782 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1785 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1787 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1789 tcp_v6_send_reset(NULL
, skb
);
1806 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1807 inet_twsk_put(inet_twsk(sk
));
1811 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1812 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1813 inet_twsk_put(inet_twsk(sk
));
1817 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1822 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1823 &ipv6_hdr(skb
)->daddr
,
1824 ntohs(th
->dest
), inet6_iif(skb
));
1826 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1827 inet_twsk_deschedule(tw
, &tcp_death_row
);
1832 /* Fall through to ACK */
1835 tcp_v6_timewait_ack(sk
, skb
);
1839 case TCP_TW_SUCCESS
:;
1844 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1846 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1847 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1848 struct inet_peer
*peer
;
1851 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1852 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1856 rt6_bind_peer(rt
, 1);
1857 peer
= rt
->rt6i_peer
;
1858 *release_it
= false;
1864 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1866 const struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1867 const struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1869 if (tw
->tw_family
== AF_INET
)
1870 return tcp_v4_tw_get_peer(sk
);
1872 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1875 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1876 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1877 .twsk_unique
= tcp_twsk_unique
,
1878 .twsk_destructor
= tcp_twsk_destructor
,
1879 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1882 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1883 .queue_xmit
= inet6_csk_xmit
,
1884 .send_check
= tcp_v6_send_check
,
1885 .rebuild_header
= inet6_sk_rebuild_header
,
1886 .conn_request
= tcp_v6_conn_request
,
1887 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1888 .get_peer
= tcp_v6_get_peer
,
1889 .net_header_len
= sizeof(struct ipv6hdr
),
1890 .setsockopt
= ipv6_setsockopt
,
1891 .getsockopt
= ipv6_getsockopt
,
1892 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1893 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1894 .bind_conflict
= inet6_csk_bind_conflict
,
1895 #ifdef CONFIG_COMPAT
1896 .compat_setsockopt
= compat_ipv6_setsockopt
,
1897 .compat_getsockopt
= compat_ipv6_getsockopt
,
1901 #ifdef CONFIG_TCP_MD5SIG
1902 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1903 .md5_lookup
= tcp_v6_md5_lookup
,
1904 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1905 .md5_add
= tcp_v6_md5_add_func
,
1906 .md5_parse
= tcp_v6_parse_md5_keys
,
1911 * TCP over IPv4 via INET6 API
1914 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1915 .queue_xmit
= ip_queue_xmit
,
1916 .send_check
= tcp_v4_send_check
,
1917 .rebuild_header
= inet_sk_rebuild_header
,
1918 .conn_request
= tcp_v6_conn_request
,
1919 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1920 .get_peer
= tcp_v4_get_peer
,
1921 .net_header_len
= sizeof(struct iphdr
),
1922 .setsockopt
= ipv6_setsockopt
,
1923 .getsockopt
= ipv6_getsockopt
,
1924 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1925 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1926 .bind_conflict
= inet6_csk_bind_conflict
,
1927 #ifdef CONFIG_COMPAT
1928 .compat_setsockopt
= compat_ipv6_setsockopt
,
1929 .compat_getsockopt
= compat_ipv6_getsockopt
,
1933 #ifdef CONFIG_TCP_MD5SIG
1934 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1935 .md5_lookup
= tcp_v4_md5_lookup
,
1936 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1937 .md5_add
= tcp_v6_md5_add_func
,
1938 .md5_parse
= tcp_v6_parse_md5_keys
,
1942 /* NOTE: A lot of things set to zero explicitly by call to
1943 * sk_alloc() so need not be done here.
1945 static int tcp_v6_init_sock(struct sock
*sk
)
1947 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1948 struct tcp_sock
*tp
= tcp_sk(sk
);
1950 skb_queue_head_init(&tp
->out_of_order_queue
);
1951 tcp_init_xmit_timers(sk
);
1952 tcp_prequeue_init(tp
);
1954 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1955 tp
->mdev
= TCP_TIMEOUT_INIT
;
1957 /* So many TCP implementations out there (incorrectly) count the
1958 * initial SYN frame in their delayed-ACK and congestion control
1959 * algorithms that we must have the following bandaid to talk
1960 * efficiently to them. -DaveM
1964 /* See draft-stevens-tcpca-spec-01 for discussion of the
1965 * initialization of these values.
1967 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1968 tp
->snd_cwnd_clamp
= ~0;
1969 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1971 tp
->reordering
= sysctl_tcp_reordering
;
1973 sk
->sk_state
= TCP_CLOSE
;
1975 icsk
->icsk_af_ops
= &ipv6_specific
;
1976 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1977 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1978 sk
->sk_write_space
= sk_stream_write_space
;
1979 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1981 #ifdef CONFIG_TCP_MD5SIG
1982 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1985 /* TCP Cookie Transactions */
1986 if (sysctl_tcp_cookie_size
> 0) {
1987 /* Default, cookies without s_data_payload. */
1989 kzalloc(sizeof(*tp
->cookie_values
),
1991 if (tp
->cookie_values
!= NULL
)
1992 kref_init(&tp
->cookie_values
->kref
);
1994 /* Presumed zeroed, in order of appearance:
1995 * cookie_in_always, cookie_out_never,
1996 * s_data_constant, s_data_in, s_data_out
1998 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1999 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
2002 sock_update_memcg(sk
);
2003 sk_sockets_allocated_inc(sk
);
2009 static void tcp_v6_destroy_sock(struct sock
*sk
)
2011 #ifdef CONFIG_TCP_MD5SIG
2012 /* Clean up the MD5 key list */
2013 if (tcp_sk(sk
)->md5sig_info
)
2014 tcp_v6_clear_md5_list(sk
);
2016 tcp_v4_destroy_sock(sk
);
2017 inet6_destroy_sock(sk
);
2020 #ifdef CONFIG_PROC_FS
2021 /* Proc filesystem TCPv6 sock list dumping. */
2022 static void get_openreq6(struct seq_file
*seq
,
2023 const struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
2025 int ttd
= req
->expires
- jiffies
;
2026 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
2027 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
2033 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2034 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2036 src
->s6_addr32
[0], src
->s6_addr32
[1],
2037 src
->s6_addr32
[2], src
->s6_addr32
[3],
2038 ntohs(inet_rsk(req
)->loc_port
),
2039 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2040 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
2041 ntohs(inet_rsk(req
)->rmt_port
),
2043 0,0, /* could print option size, but that is af dependent. */
2044 1, /* timers active (only the expire timer) */
2045 jiffies_to_clock_t(ttd
),
2048 0, /* non standard timer */
2049 0, /* open_requests have no inode */
2053 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2055 const struct in6_addr
*dest
, *src
;
2058 unsigned long timer_expires
;
2059 const struct inet_sock
*inet
= inet_sk(sp
);
2060 const struct tcp_sock
*tp
= tcp_sk(sp
);
2061 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2062 const struct ipv6_pinfo
*np
= inet6_sk(sp
);
2065 src
= &np
->rcv_saddr
;
2066 destp
= ntohs(inet
->inet_dport
);
2067 srcp
= ntohs(inet
->inet_sport
);
2069 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2071 timer_expires
= icsk
->icsk_timeout
;
2072 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2074 timer_expires
= icsk
->icsk_timeout
;
2075 } else if (timer_pending(&sp
->sk_timer
)) {
2077 timer_expires
= sp
->sk_timer
.expires
;
2080 timer_expires
= jiffies
;
2084 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2085 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2087 src
->s6_addr32
[0], src
->s6_addr32
[1],
2088 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2089 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2090 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2092 tp
->write_seq
-tp
->snd_una
,
2093 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2095 jiffies_to_clock_t(timer_expires
- jiffies
),
2096 icsk
->icsk_retransmits
,
2098 icsk
->icsk_probes_out
,
2100 atomic_read(&sp
->sk_refcnt
), sp
,
2101 jiffies_to_clock_t(icsk
->icsk_rto
),
2102 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2103 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2105 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
2109 static void get_timewait6_sock(struct seq_file
*seq
,
2110 struct inet_timewait_sock
*tw
, int i
)
2112 const struct in6_addr
*dest
, *src
;
2114 const struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2115 int ttd
= tw
->tw_ttd
- jiffies
;
2120 dest
= &tw6
->tw_v6_daddr
;
2121 src
= &tw6
->tw_v6_rcv_saddr
;
2122 destp
= ntohs(tw
->tw_dport
);
2123 srcp
= ntohs(tw
->tw_sport
);
2126 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2127 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2129 src
->s6_addr32
[0], src
->s6_addr32
[1],
2130 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2131 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2132 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2133 tw
->tw_substate
, 0, 0,
2134 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2135 atomic_read(&tw
->tw_refcnt
), tw
);
2138 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2140 struct tcp_iter_state
*st
;
2142 if (v
== SEQ_START_TOKEN
) {
2147 "st tx_queue rx_queue tr tm->when retrnsmt"
2148 " uid timeout inode\n");
2153 switch (st
->state
) {
2154 case TCP_SEQ_STATE_LISTENING
:
2155 case TCP_SEQ_STATE_ESTABLISHED
:
2156 get_tcp6_sock(seq
, v
, st
->num
);
2158 case TCP_SEQ_STATE_OPENREQ
:
2159 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2161 case TCP_SEQ_STATE_TIME_WAIT
:
2162 get_timewait6_sock(seq
, v
, st
->num
);
2169 static const struct file_operations tcp6_afinfo_seq_fops
= {
2170 .owner
= THIS_MODULE
,
2171 .open
= tcp_seq_open
,
2173 .llseek
= seq_lseek
,
2174 .release
= seq_release_net
2177 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2180 .seq_fops
= &tcp6_afinfo_seq_fops
,
2182 .show
= tcp6_seq_show
,
2186 int __net_init
tcp6_proc_init(struct net
*net
)
2188 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2191 void tcp6_proc_exit(struct net
*net
)
2193 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2197 struct proto tcpv6_prot
= {
2199 .owner
= THIS_MODULE
,
2201 .connect
= tcp_v6_connect
,
2202 .disconnect
= tcp_disconnect
,
2203 .accept
= inet_csk_accept
,
2205 .init
= tcp_v6_init_sock
,
2206 .destroy
= tcp_v6_destroy_sock
,
2207 .shutdown
= tcp_shutdown
,
2208 .setsockopt
= tcp_setsockopt
,
2209 .getsockopt
= tcp_getsockopt
,
2210 .recvmsg
= tcp_recvmsg
,
2211 .sendmsg
= tcp_sendmsg
,
2212 .sendpage
= tcp_sendpage
,
2213 .backlog_rcv
= tcp_v6_do_rcv
,
2214 .hash
= tcp_v6_hash
,
2215 .unhash
= inet_unhash
,
2216 .get_port
= inet_csk_get_port
,
2217 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2218 .sockets_allocated
= &tcp_sockets_allocated
,
2219 .memory_allocated
= &tcp_memory_allocated
,
2220 .memory_pressure
= &tcp_memory_pressure
,
2221 .orphan_count
= &tcp_orphan_count
,
2222 .sysctl_wmem
= sysctl_tcp_wmem
,
2223 .sysctl_rmem
= sysctl_tcp_rmem
,
2224 .max_header
= MAX_TCP_HEADER
,
2225 .obj_size
= sizeof(struct tcp6_sock
),
2226 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2227 .twsk_prot
= &tcp6_timewait_sock_ops
,
2228 .rsk_prot
= &tcp6_request_sock_ops
,
2229 .h
.hashinfo
= &tcp_hashinfo
,
2230 .no_autobind
= true,
2231 #ifdef CONFIG_COMPAT
2232 .compat_setsockopt
= compat_tcp_setsockopt
,
2233 .compat_getsockopt
= compat_tcp_getsockopt
,
2235 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2236 .proto_cgroup
= tcp_proto_cgroup
,
2240 static const struct inet6_protocol tcpv6_protocol
= {
2241 .handler
= tcp_v6_rcv
,
2242 .err_handler
= tcp_v6_err
,
2243 .gso_send_check
= tcp_v6_gso_send_check
,
2244 .gso_segment
= tcp_tso_segment
,
2245 .gro_receive
= tcp6_gro_receive
,
2246 .gro_complete
= tcp6_gro_complete
,
2247 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2250 static struct inet_protosw tcpv6_protosw
= {
2251 .type
= SOCK_STREAM
,
2252 .protocol
= IPPROTO_TCP
,
2253 .prot
= &tcpv6_prot
,
2254 .ops
= &inet6_stream_ops
,
2256 .flags
= INET_PROTOSW_PERMANENT
|
2260 static int __net_init
tcpv6_net_init(struct net
*net
)
2262 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2263 SOCK_RAW
, IPPROTO_TCP
, net
);
2266 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2268 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2271 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2273 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2276 static struct pernet_operations tcpv6_net_ops
= {
2277 .init
= tcpv6_net_init
,
2278 .exit
= tcpv6_net_exit
,
2279 .exit_batch
= tcpv6_net_exit_batch
,
2282 int __init
tcpv6_init(void)
2286 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2290 /* register inet6 protocol */
2291 ret
= inet6_register_protosw(&tcpv6_protosw
);
2293 goto out_tcpv6_protocol
;
2295 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2297 goto out_tcpv6_protosw
;
2302 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2304 inet6_unregister_protosw(&tcpv6_protosw
);
2308 void tcpv6_exit(void)
2310 unregister_pernet_subsys(&tcpv6_net_ops
);
2311 inet6_unregister_protosw(&tcpv6_protosw
);
2312 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);