3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
74 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
75 struct request_sock
*req
);
77 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static void __tcp_v6_send_check(struct sk_buff
*skb
,
79 struct in6_addr
*saddr
,
80 struct in6_addr
*daddr
);
82 static const struct inet_connection_sock_af_ops ipv6_mapped
;
83 static const struct inet_connection_sock_af_ops ipv6_specific
;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
88 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
89 struct in6_addr
*addr
)
95 static void tcp_v6_hash(struct sock
*sk
)
97 if (sk
->sk_state
!= TCP_CLOSE
) {
98 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
103 __inet6_hash(sk
, NULL
);
108 static __inline__ __sum16
tcp_v6_check(int len
,
109 struct in6_addr
*saddr
,
110 struct in6_addr
*daddr
,
113 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
116 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
119 ipv6_hdr(skb
)->saddr
.s6_addr32
,
121 tcp_hdr(skb
)->source
);
124 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
127 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
128 struct inet_sock
*inet
= inet_sk(sk
);
129 struct inet_connection_sock
*icsk
= inet_csk(sk
);
130 struct ipv6_pinfo
*np
= inet6_sk(sk
);
131 struct tcp_sock
*tp
= tcp_sk(sk
);
132 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
135 struct dst_entry
*dst
;
139 if (addr_len
< SIN6_LEN_RFC2133
)
142 if (usin
->sin6_family
!= AF_INET6
)
143 return -EAFNOSUPPORT
;
145 memset(&fl6
, 0, sizeof(fl6
));
148 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
149 IP6_ECN_flow_init(fl6
.flowlabel
);
150 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
151 struct ip6_flowlabel
*flowlabel
;
152 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
153 if (flowlabel
== NULL
)
155 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
156 fl6_sock_release(flowlabel
);
161 * connect() to INADDR_ANY means loopback (BSD'ism).
164 if(ipv6_addr_any(&usin
->sin6_addr
))
165 usin
->sin6_addr
.s6_addr
[15] = 0x1;
167 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
169 if(addr_type
& IPV6_ADDR_MULTICAST
)
172 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
173 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
174 usin
->sin6_scope_id
) {
175 /* If interface is set while binding, indices
178 if (sk
->sk_bound_dev_if
&&
179 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
182 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
185 /* Connect to link-local address requires an interface */
186 if (!sk
->sk_bound_dev_if
)
190 if (tp
->rx_opt
.ts_recent_stamp
&&
191 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
192 tp
->rx_opt
.ts_recent
= 0;
193 tp
->rx_opt
.ts_recent_stamp
= 0;
197 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
198 np
->flow_label
= fl6
.flowlabel
;
204 if (addr_type
== IPV6_ADDR_MAPPED
) {
205 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
206 struct sockaddr_in sin
;
208 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
210 if (__ipv6_only_sock(sk
))
213 sin
.sin_family
= AF_INET
;
214 sin
.sin_port
= usin
->sin6_port
;
215 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
217 icsk
->icsk_af_ops
= &ipv6_mapped
;
218 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
223 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
226 icsk
->icsk_ext_hdr_len
= exthdrlen
;
227 icsk
->icsk_af_ops
= &ipv6_specific
;
228 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
229 #ifdef CONFIG_TCP_MD5SIG
230 tp
->af_specific
= &tcp_sock_ipv6_specific
;
234 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
235 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
242 if (!ipv6_addr_any(&np
->rcv_saddr
))
243 saddr
= &np
->rcv_saddr
;
245 fl6
.flowi6_proto
= IPPROTO_TCP
;
246 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
247 ipv6_addr_copy(&fl6
.saddr
,
248 (saddr
? saddr
: &np
->saddr
));
249 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
250 fl6
.flowi6_mark
= sk
->sk_mark
;
251 fl6
.fl6_dport
= usin
->sin6_port
;
252 fl6
.fl6_sport
= inet
->inet_sport
;
254 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
256 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
258 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
266 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
269 /* set the source address */
270 ipv6_addr_copy(&np
->saddr
, saddr
);
271 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
273 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
274 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
276 rt
= (struct rt6_info
*) dst
;
277 if (tcp_death_row
.sysctl_tw_recycle
&&
278 !tp
->rx_opt
.ts_recent_stamp
&&
279 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
280 struct inet_peer
*peer
= rt6_get_peer(rt
);
282 * VJ's idea. We save last timestamp seen from
283 * the destination in peer table, when entering state
284 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
285 * when trying new connection.
288 inet_peer_refcheck(peer
);
289 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
290 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
291 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
296 icsk
->icsk_ext_hdr_len
= 0;
298 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
301 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
303 inet
->inet_dport
= usin
->sin6_port
;
305 tcp_set_state(sk
, TCP_SYN_SENT
);
306 err
= inet6_hash_connect(&tcp_death_row
, sk
);
311 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
316 err
= tcp_connect(sk
);
323 tcp_set_state(sk
, TCP_CLOSE
);
326 inet
->inet_dport
= 0;
327 sk
->sk_route_caps
= 0;
331 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
332 u8 type
, u8 code
, int offset
, __be32 info
)
334 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
335 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
336 struct ipv6_pinfo
*np
;
341 struct net
*net
= dev_net(skb
->dev
);
343 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
344 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
347 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
352 if (sk
->sk_state
== TCP_TIME_WAIT
) {
353 inet_twsk_put(inet_twsk(sk
));
358 if (sock_owned_by_user(sk
))
359 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
361 if (sk
->sk_state
== TCP_CLOSE
)
364 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
365 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
370 seq
= ntohl(th
->seq
);
371 if (sk
->sk_state
!= TCP_LISTEN
&&
372 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
373 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
379 if (type
== ICMPV6_PKT_TOOBIG
) {
380 struct dst_entry
*dst
;
382 if (sock_owned_by_user(sk
))
384 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
387 /* icmp should have updated the destination cache entry */
388 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
391 struct inet_sock
*inet
= inet_sk(sk
);
394 /* BUGGG_FUTURE: Again, it is not clear how
395 to handle rthdr case. Ignore this complexity
398 memset(&fl6
, 0, sizeof(fl6
));
399 fl6
.flowi6_proto
= IPPROTO_TCP
;
400 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
401 ipv6_addr_copy(&fl6
.saddr
, &np
->saddr
);
402 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
403 fl6
.flowi6_mark
= sk
->sk_mark
;
404 fl6
.fl6_dport
= inet
->inet_dport
;
405 fl6
.fl6_sport
= inet
->inet_sport
;
406 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
408 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
410 sk
->sk_err_soft
= -PTR_ERR(dst
);
417 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
418 tcp_sync_mss(sk
, dst_mtu(dst
));
419 tcp_simple_retransmit(sk
);
420 } /* else let the usual retransmit timer handle it */
425 icmpv6_err_convert(type
, code
, &err
);
427 /* Might be for an request_sock */
428 switch (sk
->sk_state
) {
429 struct request_sock
*req
, **prev
;
431 if (sock_owned_by_user(sk
))
434 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
435 &hdr
->saddr
, inet6_iif(skb
));
439 /* ICMPs are not backlogged, hence we cannot get
440 * an established socket here.
442 WARN_ON(req
->sk
!= NULL
);
444 if (seq
!= tcp_rsk(req
)->snt_isn
) {
445 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
449 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
453 case TCP_SYN_RECV
: /* Cannot happen.
454 It can, it SYNs are crossed. --ANK */
455 if (!sock_owned_by_user(sk
)) {
457 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
461 sk
->sk_err_soft
= err
;
465 if (!sock_owned_by_user(sk
) && np
->recverr
) {
467 sk
->sk_error_report(sk
);
469 sk
->sk_err_soft
= err
;
477 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
478 struct request_values
*rvp
)
480 struct inet6_request_sock
*treq
= inet6_rsk(req
);
481 struct ipv6_pinfo
*np
= inet6_sk(sk
);
482 struct sk_buff
* skb
;
483 struct ipv6_txoptions
*opt
= NULL
;
484 struct in6_addr
* final_p
, final
;
486 struct dst_entry
*dst
;
489 memset(&fl6
, 0, sizeof(fl6
));
490 fl6
.flowi6_proto
= IPPROTO_TCP
;
491 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
492 ipv6_addr_copy(&fl6
.saddr
, &treq
->loc_addr
);
494 fl6
.flowi6_oif
= treq
->iif
;
495 fl6
.flowi6_mark
= sk
->sk_mark
;
496 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
497 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
498 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
501 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
503 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
508 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
511 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
513 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
514 err
= ip6_xmit(sk
, skb
, &fl6
, opt
);
515 err
= net_xmit_eval(err
);
519 if (opt
&& opt
!= np
->opt
)
520 sock_kfree_s(sk
, opt
, opt
->tot_len
);
525 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
526 struct request_values
*rvp
)
528 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
529 return tcp_v6_send_synack(sk
, req
, rvp
);
532 static inline void syn_flood_warning(struct sk_buff
*skb
)
534 #ifdef CONFIG_SYN_COOKIES
535 if (sysctl_tcp_syncookies
)
537 "TCPv6: Possible SYN flooding on port %d. "
538 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
542 "TCPv6: Possible SYN flooding on port %d. "
543 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
546 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
548 kfree_skb(inet6_rsk(req
)->pktopts
);
551 #ifdef CONFIG_TCP_MD5SIG
552 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
553 struct in6_addr
*addr
)
555 struct tcp_sock
*tp
= tcp_sk(sk
);
560 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
563 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
564 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
565 return &tp
->md5sig_info
->keys6
[i
].base
;
570 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
571 struct sock
*addr_sk
)
573 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
576 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
577 struct request_sock
*req
)
579 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
582 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
583 char *newkey
, u8 newkeylen
)
585 /* Add key to the list */
586 struct tcp_md5sig_key
*key
;
587 struct tcp_sock
*tp
= tcp_sk(sk
);
588 struct tcp6_md5sig_key
*keys
;
590 key
= tcp_v6_md5_do_lookup(sk
, peer
);
592 /* modify existing entry - just update that one */
595 key
->keylen
= newkeylen
;
597 /* reallocate new list if current one is full. */
598 if (!tp
->md5sig_info
) {
599 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
600 if (!tp
->md5sig_info
) {
604 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
606 if (tcp_alloc_md5sig_pool(sk
) == NULL
) {
610 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
611 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
612 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
615 tcp_free_md5sig_pool();
620 if (tp
->md5sig_info
->entries6
)
621 memmove(keys
, tp
->md5sig_info
->keys6
,
622 (sizeof (tp
->md5sig_info
->keys6
[0]) *
623 tp
->md5sig_info
->entries6
));
625 kfree(tp
->md5sig_info
->keys6
);
626 tp
->md5sig_info
->keys6
= keys
;
627 tp
->md5sig_info
->alloced6
++;
630 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
632 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
633 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
635 tp
->md5sig_info
->entries6
++;
640 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
641 u8
*newkey
, __u8 newkeylen
)
643 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
647 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
649 struct tcp_sock
*tp
= tcp_sk(sk
);
652 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
653 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
655 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
656 tp
->md5sig_info
->entries6
--;
658 if (tp
->md5sig_info
->entries6
== 0) {
659 kfree(tp
->md5sig_info
->keys6
);
660 tp
->md5sig_info
->keys6
= NULL
;
661 tp
->md5sig_info
->alloced6
= 0;
663 /* shrink the database */
664 if (tp
->md5sig_info
->entries6
!= i
)
665 memmove(&tp
->md5sig_info
->keys6
[i
],
666 &tp
->md5sig_info
->keys6
[i
+1],
667 (tp
->md5sig_info
->entries6
- i
)
668 * sizeof (tp
->md5sig_info
->keys6
[0]));
670 tcp_free_md5sig_pool();
677 static void tcp_v6_clear_md5_list (struct sock
*sk
)
679 struct tcp_sock
*tp
= tcp_sk(sk
);
682 if (tp
->md5sig_info
->entries6
) {
683 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
684 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
685 tp
->md5sig_info
->entries6
= 0;
686 tcp_free_md5sig_pool();
689 kfree(tp
->md5sig_info
->keys6
);
690 tp
->md5sig_info
->keys6
= NULL
;
691 tp
->md5sig_info
->alloced6
= 0;
693 if (tp
->md5sig_info
->entries4
) {
694 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
695 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
696 tp
->md5sig_info
->entries4
= 0;
697 tcp_free_md5sig_pool();
700 kfree(tp
->md5sig_info
->keys4
);
701 tp
->md5sig_info
->keys4
= NULL
;
702 tp
->md5sig_info
->alloced4
= 0;
705 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
708 struct tcp_md5sig cmd
;
709 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
712 if (optlen
< sizeof(cmd
))
715 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
718 if (sin6
->sin6_family
!= AF_INET6
)
721 if (!cmd
.tcpm_keylen
) {
722 if (!tcp_sk(sk
)->md5sig_info
)
724 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
725 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
726 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
729 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
732 if (!tcp_sk(sk
)->md5sig_info
) {
733 struct tcp_sock
*tp
= tcp_sk(sk
);
734 struct tcp_md5sig_info
*p
;
736 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
741 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
744 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
747 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
748 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
749 newkey
, cmd
.tcpm_keylen
);
751 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
754 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
755 struct in6_addr
*daddr
,
756 struct in6_addr
*saddr
, int nbytes
)
758 struct tcp6_pseudohdr
*bp
;
759 struct scatterlist sg
;
761 bp
= &hp
->md5_blk
.ip6
;
762 /* 1. TCP pseudo-header (RFC2460) */
763 ipv6_addr_copy(&bp
->saddr
, saddr
);
764 ipv6_addr_copy(&bp
->daddr
, daddr
);
765 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
766 bp
->len
= cpu_to_be32(nbytes
);
768 sg_init_one(&sg
, bp
, sizeof(*bp
));
769 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
772 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
773 struct in6_addr
*daddr
, struct in6_addr
*saddr
,
776 struct tcp_md5sig_pool
*hp
;
777 struct hash_desc
*desc
;
779 hp
= tcp_get_md5sig_pool();
781 goto clear_hash_noput
;
782 desc
= &hp
->md5_desc
;
784 if (crypto_hash_init(desc
))
786 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
788 if (tcp_md5_hash_header(hp
, th
))
790 if (tcp_md5_hash_key(hp
, key
))
792 if (crypto_hash_final(desc
, md5_hash
))
795 tcp_put_md5sig_pool();
799 tcp_put_md5sig_pool();
801 memset(md5_hash
, 0, 16);
805 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
806 struct sock
*sk
, struct request_sock
*req
,
809 struct in6_addr
*saddr
, *daddr
;
810 struct tcp_md5sig_pool
*hp
;
811 struct hash_desc
*desc
;
812 struct tcphdr
*th
= tcp_hdr(skb
);
815 saddr
= &inet6_sk(sk
)->saddr
;
816 daddr
= &inet6_sk(sk
)->daddr
;
818 saddr
= &inet6_rsk(req
)->loc_addr
;
819 daddr
= &inet6_rsk(req
)->rmt_addr
;
821 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
822 saddr
= &ip6h
->saddr
;
823 daddr
= &ip6h
->daddr
;
826 hp
= tcp_get_md5sig_pool();
828 goto clear_hash_noput
;
829 desc
= &hp
->md5_desc
;
831 if (crypto_hash_init(desc
))
834 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
836 if (tcp_md5_hash_header(hp
, th
))
838 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
840 if (tcp_md5_hash_key(hp
, key
))
842 if (crypto_hash_final(desc
, md5_hash
))
845 tcp_put_md5sig_pool();
849 tcp_put_md5sig_pool();
851 memset(md5_hash
, 0, 16);
855 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
857 __u8
*hash_location
= NULL
;
858 struct tcp_md5sig_key
*hash_expected
;
859 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
860 struct tcphdr
*th
= tcp_hdr(skb
);
864 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
865 hash_location
= tcp_parse_md5sig_option(th
);
867 /* We've parsed the options - do we have a hash? */
868 if (!hash_expected
&& !hash_location
)
871 if (hash_expected
&& !hash_location
) {
872 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
876 if (!hash_expected
&& hash_location
) {
877 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
881 /* check the signature */
882 genhash
= tcp_v6_md5_hash_skb(newhash
,
886 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
887 if (net_ratelimit()) {
888 printk(KERN_INFO
"MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
889 genhash
? "failed" : "mismatch",
890 &ip6h
->saddr
, ntohs(th
->source
),
891 &ip6h
->daddr
, ntohs(th
->dest
));
899 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
901 .obj_size
= sizeof(struct tcp6_request_sock
),
902 .rtx_syn_ack
= tcp_v6_rtx_synack
,
903 .send_ack
= tcp_v6_reqsk_send_ack
,
904 .destructor
= tcp_v6_reqsk_destructor
,
905 .send_reset
= tcp_v6_send_reset
,
906 .syn_ack_timeout
= tcp_syn_ack_timeout
,
909 #ifdef CONFIG_TCP_MD5SIG
910 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
911 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
912 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
916 static void __tcp_v6_send_check(struct sk_buff
*skb
,
917 struct in6_addr
*saddr
, struct in6_addr
*daddr
)
919 struct tcphdr
*th
= tcp_hdr(skb
);
921 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
922 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
923 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
924 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
926 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
927 csum_partial(th
, th
->doff
<< 2,
932 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
934 struct ipv6_pinfo
*np
= inet6_sk(sk
);
936 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
939 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
941 struct ipv6hdr
*ipv6h
;
944 if (!pskb_may_pull(skb
, sizeof(*th
)))
947 ipv6h
= ipv6_hdr(skb
);
951 skb
->ip_summed
= CHECKSUM_PARTIAL
;
952 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
956 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
959 struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
961 switch (skb
->ip_summed
) {
962 case CHECKSUM_COMPLETE
:
963 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
965 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
971 NAPI_GRO_CB(skb
)->flush
= 1;
975 return tcp_gro_receive(head
, skb
);
978 static int tcp6_gro_complete(struct sk_buff
*skb
)
980 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
981 struct tcphdr
*th
= tcp_hdr(skb
);
983 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
984 &iph
->saddr
, &iph
->daddr
, 0);
985 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
987 return tcp_gro_complete(skb
);
990 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
991 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
993 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
994 struct sk_buff
*buff
;
996 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
997 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
998 unsigned int tot_len
= sizeof(struct tcphdr
);
999 struct dst_entry
*dst
;
1003 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1004 #ifdef CONFIG_TCP_MD5SIG
1006 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1009 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1014 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1016 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1017 skb_reset_transport_header(buff
);
1019 /* Swap the send and the receive. */
1020 memset(t1
, 0, sizeof(*t1
));
1021 t1
->dest
= th
->source
;
1022 t1
->source
= th
->dest
;
1023 t1
->doff
= tot_len
/ 4;
1024 t1
->seq
= htonl(seq
);
1025 t1
->ack_seq
= htonl(ack
);
1026 t1
->ack
= !rst
|| !th
->ack
;
1028 t1
->window
= htons(win
);
1030 topt
= (__be32
*)(t1
+ 1);
1033 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1034 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1035 *topt
++ = htonl(tcp_time_stamp
);
1036 *topt
++ = htonl(ts
);
1039 #ifdef CONFIG_TCP_MD5SIG
1041 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1042 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1043 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1044 &ipv6_hdr(skb
)->saddr
,
1045 &ipv6_hdr(skb
)->daddr
, t1
);
1049 memset(&fl6
, 0, sizeof(fl6
));
1050 ipv6_addr_copy(&fl6
.daddr
, &ipv6_hdr(skb
)->saddr
);
1051 ipv6_addr_copy(&fl6
.saddr
, &ipv6_hdr(skb
)->daddr
);
1053 buff
->ip_summed
= CHECKSUM_PARTIAL
;
1056 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
1058 fl6
.flowi6_proto
= IPPROTO_TCP
;
1059 fl6
.flowi6_oif
= inet6_iif(skb
);
1060 fl6
.fl6_dport
= t1
->dest
;
1061 fl6
.fl6_sport
= t1
->source
;
1062 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
1064 /* Pass a socket to ip6_dst_lookup either it is for RST
1065 * Underlying function will use this to retrieve the network
1068 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
1070 skb_dst_set(buff
, dst
);
1071 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
);
1072 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1074 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1081 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1083 struct tcphdr
*th
= tcp_hdr(skb
);
1084 u32 seq
= 0, ack_seq
= 0;
1085 struct tcp_md5sig_key
*key
= NULL
;
1090 if (!ipv6_unicast_destination(skb
))
1093 #ifdef CONFIG_TCP_MD5SIG
1095 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1099 seq
= ntohl(th
->ack_seq
);
1101 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1104 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1107 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1108 struct tcp_md5sig_key
*key
)
1110 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1113 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1115 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1116 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1118 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1119 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1120 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1125 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1126 struct request_sock
*req
)
1128 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1129 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1133 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1135 struct request_sock
*req
, **prev
;
1136 const struct tcphdr
*th
= tcp_hdr(skb
);
1139 /* Find possible connection requests. */
1140 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1141 &ipv6_hdr(skb
)->saddr
,
1142 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1144 return tcp_check_req(sk
, skb
, req
, prev
);
1146 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1147 &ipv6_hdr(skb
)->saddr
, th
->source
,
1148 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1151 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1155 inet_twsk_put(inet_twsk(nsk
));
1159 #ifdef CONFIG_SYN_COOKIES
1161 sk
= cookie_v6_check(sk
, skb
);
1166 /* FIXME: this is substantially similar to the ipv4 code.
1167 * Can some kind of merge be done? -- erics
1169 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1171 struct tcp_extend_values tmp_ext
;
1172 struct tcp_options_received tmp_opt
;
1174 struct request_sock
*req
;
1175 struct inet6_request_sock
*treq
;
1176 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1177 struct tcp_sock
*tp
= tcp_sk(sk
);
1178 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1179 struct dst_entry
*dst
= NULL
;
1180 #ifdef CONFIG_SYN_COOKIES
1181 int want_cookie
= 0;
1183 #define want_cookie 0
1186 if (skb
->protocol
== htons(ETH_P_IP
))
1187 return tcp_v4_conn_request(sk
, skb
);
1189 if (!ipv6_unicast_destination(skb
))
1192 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1193 if (net_ratelimit())
1194 syn_flood_warning(skb
);
1195 #ifdef CONFIG_SYN_COOKIES
1196 if (sysctl_tcp_syncookies
)
1203 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1206 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1210 #ifdef CONFIG_TCP_MD5SIG
1211 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1214 tcp_clear_options(&tmp_opt
);
1215 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1216 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1217 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1219 if (tmp_opt
.cookie_plus
> 0 &&
1220 tmp_opt
.saw_tstamp
&&
1221 !tp
->rx_opt
.cookie_out_never
&&
1222 (sysctl_tcp_cookie_size
> 0 ||
1223 (tp
->cookie_values
!= NULL
&&
1224 tp
->cookie_values
->cookie_desired
> 0))) {
1227 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1228 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1230 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1233 /* Secret recipe starts with IP addresses */
1234 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1239 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1245 /* plus variable length Initiator Cookie */
1248 *c
++ ^= *hash_location
++;
1250 #ifdef CONFIG_SYN_COOKIES
1251 want_cookie
= 0; /* not our kind of cookie */
1253 tmp_ext
.cookie_out_never
= 0; /* false */
1254 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1255 } else if (!tp
->rx_opt
.cookie_in_always
) {
1256 /* redundant indications, but ensure initialization. */
1257 tmp_ext
.cookie_out_never
= 1; /* true */
1258 tmp_ext
.cookie_plus
= 0;
1262 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1264 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1265 tcp_clear_options(&tmp_opt
);
1267 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1268 tcp_openreq_init(req
, &tmp_opt
, skb
);
1270 treq
= inet6_rsk(req
);
1271 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1272 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1273 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1274 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1277 struct inet_peer
*peer
= NULL
;
1279 if (ipv6_opt_accepted(sk
, skb
) ||
1280 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1281 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1282 atomic_inc(&skb
->users
);
1283 treq
->pktopts
= skb
;
1285 treq
->iif
= sk
->sk_bound_dev_if
;
1287 /* So that link locals have meaning */
1288 if (!sk
->sk_bound_dev_if
&&
1289 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1290 treq
->iif
= inet6_iif(skb
);
1293 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1294 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1298 /* VJ's idea. We save last timestamp seen
1299 * from the destination in peer table, when entering
1300 * state TIME-WAIT, and check against it before
1301 * accepting new connection request.
1303 * If "isn" is not zero, this request hit alive
1304 * timewait bucket, so that all the necessary checks
1305 * are made in the function processing timewait state.
1307 if (tmp_opt
.saw_tstamp
&&
1308 tcp_death_row
.sysctl_tw_recycle
&&
1309 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1310 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1311 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1313 inet_peer_refcheck(peer
);
1314 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1315 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1317 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1318 goto drop_and_release
;
1321 /* Kill the following clause, if you dislike this way. */
1322 else if (!sysctl_tcp_syncookies
&&
1323 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1324 (sysctl_max_syn_backlog
>> 2)) &&
1325 (!peer
|| !peer
->tcp_ts_stamp
) &&
1326 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1327 /* Without syncookies last quarter of
1328 * backlog is filled with destinations,
1329 * proven to be alive.
1330 * It means that we continue to communicate
1331 * to destinations, already remembered
1332 * to the moment of synflood.
1334 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1335 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1336 goto drop_and_release
;
1339 isn
= tcp_v6_init_sequence(skb
);
1342 tcp_rsk(req
)->snt_isn
= isn
;
1344 security_inet_conn_request(sk
, skb
, req
);
1346 if (tcp_v6_send_synack(sk
, req
,
1347 (struct request_values
*)&tmp_ext
) ||
1351 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1359 return 0; /* don't send reset */
1362 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1363 struct request_sock
*req
,
1364 struct dst_entry
*dst
)
1366 struct inet6_request_sock
*treq
;
1367 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1368 struct tcp6_sock
*newtcp6sk
;
1369 struct inet_sock
*newinet
;
1370 struct tcp_sock
*newtp
;
1372 struct ipv6_txoptions
*opt
;
1373 #ifdef CONFIG_TCP_MD5SIG
1374 struct tcp_md5sig_key
*key
;
1377 if (skb
->protocol
== htons(ETH_P_IP
)) {
1382 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1387 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1388 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1390 newinet
= inet_sk(newsk
);
1391 newnp
= inet6_sk(newsk
);
1392 newtp
= tcp_sk(newsk
);
1394 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1396 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1398 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1400 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1402 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1403 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1404 #ifdef CONFIG_TCP_MD5SIG
1405 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1408 newnp
->pktoptions
= NULL
;
1410 newnp
->mcast_oif
= inet6_iif(skb
);
1411 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1414 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1415 * here, tcp_create_openreq_child now does this for us, see the comment in
1416 * that function for the gory details. -acme
1419 /* It is tricky place. Until this moment IPv4 tcp
1420 worked with IPv6 icsk.icsk_af_ops.
1423 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1428 treq
= inet6_rsk(req
);
1431 if (sk_acceptq_is_full(sk
))
1435 dst
= inet6_csk_route_req(sk
, req
);
1440 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1445 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1446 * count here, tcp_create_openreq_child now does this for us, see the
1447 * comment in that function for the gory details. -acme
1450 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1451 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1453 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1454 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1456 newtp
= tcp_sk(newsk
);
1457 newinet
= inet_sk(newsk
);
1458 newnp
= inet6_sk(newsk
);
1460 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1462 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1463 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1464 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1465 newsk
->sk_bound_dev_if
= treq
->iif
;
1467 /* Now IPv6 options...
1469 First: no IPv4 options.
1471 newinet
->opt
= NULL
;
1472 newnp
->ipv6_fl_list
= NULL
;
1475 newnp
->rxopt
.all
= np
->rxopt
.all
;
1477 /* Clone pktoptions received with SYN */
1478 newnp
->pktoptions
= NULL
;
1479 if (treq
->pktopts
!= NULL
) {
1480 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1481 kfree_skb(treq
->pktopts
);
1482 treq
->pktopts
= NULL
;
1483 if (newnp
->pktoptions
)
1484 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1487 newnp
->mcast_oif
= inet6_iif(skb
);
1488 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1490 /* Clone native IPv6 options from listening socket (if any)
1492 Yes, keeping reference count would be much more clever,
1493 but we make one more one thing there: reattach optmem
1497 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1499 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1502 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1504 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1505 newnp
->opt
->opt_flen
);
1507 tcp_mtup_init(newsk
);
1508 tcp_sync_mss(newsk
, dst_mtu(dst
));
1509 newtp
->advmss
= dst_metric_advmss(dst
);
1510 tcp_initialize_rcv_mss(newsk
);
1512 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1513 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1515 #ifdef CONFIG_TCP_MD5SIG
1516 /* Copy over the MD5 key from the original socket */
1517 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1518 /* We're using one, so create a matching key
1519 * on the newsk structure. If we fail to get
1520 * memory, then we end up not copying the key
1523 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1525 tcp_v6_md5_do_add(newsk
, &newnp
->daddr
,
1526 newkey
, key
->keylen
);
1530 if (__inet_inherit_port(sk
, newsk
) < 0) {
1534 __inet6_hash(newsk
, NULL
);
1539 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1541 if (opt
&& opt
!= np
->opt
)
1542 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1545 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1549 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1551 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1552 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1553 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1554 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1559 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1560 &ipv6_hdr(skb
)->saddr
,
1561 &ipv6_hdr(skb
)->daddr
, 0));
1563 if (skb
->len
<= 76) {
1564 return __skb_checksum_complete(skb
);
1569 /* The socket must have it's spinlock held when we get
1572 * We have a potential double-lock case here, so even when
1573 * doing backlog processing we use the BH locking scheme.
1574 * This is because we cannot sleep with the original spinlock
1577 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1579 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1580 struct tcp_sock
*tp
;
1581 struct sk_buff
*opt_skb
= NULL
;
1583 /* Imagine: socket is IPv6. IPv4 packet arrives,
1584 goes to IPv4 receive handler and backlogged.
1585 From backlog it always goes here. Kerboom...
1586 Fortunately, tcp_rcv_established and rcv_established
1587 handle them correctly, but it is not case with
1588 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1591 if (skb
->protocol
== htons(ETH_P_IP
))
1592 return tcp_v4_do_rcv(sk
, skb
);
1594 #ifdef CONFIG_TCP_MD5SIG
1595 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1599 if (sk_filter(sk
, skb
))
1603 * socket locking is here for SMP purposes as backlog rcv
1604 * is currently called with bh processing disabled.
1607 /* Do Stevens' IPV6_PKTOPTIONS.
1609 Yes, guys, it is the only place in our code, where we
1610 may make it not affecting IPv4.
1611 The rest of code is protocol independent,
1612 and I do not like idea to uglify IPv4.
1614 Actually, all the idea behind IPV6_PKTOPTIONS
1615 looks not very well thought. For now we latch
1616 options, received in the last packet, enqueued
1617 by tcp. Feel free to propose better solution.
1621 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1623 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1624 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1627 goto ipv6_pktoptions
;
1631 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1634 if (sk
->sk_state
== TCP_LISTEN
) {
1635 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1640 * Queue it on the new socket if the new socket is active,
1641 * otherwise we just shortcircuit this and continue with
1645 if (tcp_child_process(sk
, nsk
, skb
))
1648 __kfree_skb(opt_skb
);
1653 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1656 goto ipv6_pktoptions
;
1660 tcp_v6_send_reset(sk
, skb
);
1663 __kfree_skb(opt_skb
);
1667 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1672 /* Do you ask, what is it?
1674 1. skb was enqueued by tcp.
1675 2. skb is added to tail of read queue, rather than out of order.
1676 3. socket is not in passive state.
1677 4. Finally, it really contains options, which user wants to receive.
1680 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1681 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1682 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1683 np
->mcast_oif
= inet6_iif(opt_skb
);
1684 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1685 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1686 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1687 skb_set_owner_r(opt_skb
, sk
);
1688 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1690 __kfree_skb(opt_skb
);
1691 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1699 static int tcp_v6_rcv(struct sk_buff
*skb
)
1702 struct ipv6hdr
*hdr
;
1705 struct net
*net
= dev_net(skb
->dev
);
1707 if (skb
->pkt_type
!= PACKET_HOST
)
1711 * Count it even if it's bad.
1713 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1715 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1720 if (th
->doff
< sizeof(struct tcphdr
)/4)
1722 if (!pskb_may_pull(skb
, th
->doff
*4))
1725 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1729 hdr
= ipv6_hdr(skb
);
1730 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1731 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1732 skb
->len
- th
->doff
*4);
1733 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1734 TCP_SKB_CB(skb
)->when
= 0;
1735 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(hdr
);
1736 TCP_SKB_CB(skb
)->sacked
= 0;
1738 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1743 if (sk
->sk_state
== TCP_TIME_WAIT
)
1746 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1747 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1748 goto discard_and_relse
;
1751 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1752 goto discard_and_relse
;
1754 if (sk_filter(sk
, skb
))
1755 goto discard_and_relse
;
1759 bh_lock_sock_nested(sk
);
1761 if (!sock_owned_by_user(sk
)) {
1762 #ifdef CONFIG_NET_DMA
1763 struct tcp_sock
*tp
= tcp_sk(sk
);
1764 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1765 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1766 if (tp
->ucopy
.dma_chan
)
1767 ret
= tcp_v6_do_rcv(sk
, skb
);
1771 if (!tcp_prequeue(sk
, skb
))
1772 ret
= tcp_v6_do_rcv(sk
, skb
);
1774 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1776 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1777 goto discard_and_relse
;
1782 return ret
? -1 : 0;
1785 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1788 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1790 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1792 tcp_v6_send_reset(NULL
, skb
);
1809 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1810 inet_twsk_put(inet_twsk(sk
));
1814 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1815 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1816 inet_twsk_put(inet_twsk(sk
));
1820 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1825 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1826 &ipv6_hdr(skb
)->daddr
,
1827 ntohs(th
->dest
), inet6_iif(skb
));
1829 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1830 inet_twsk_deschedule(tw
, &tcp_death_row
);
1835 /* Fall through to ACK */
1838 tcp_v6_timewait_ack(sk
, skb
);
1842 case TCP_TW_SUCCESS
:;
1847 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1849 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1850 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1851 struct inet_peer
*peer
;
1854 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1855 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1859 rt6_bind_peer(rt
, 1);
1860 peer
= rt
->rt6i_peer
;
1861 *release_it
= false;
1867 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1869 struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1870 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1872 if (tw
->tw_family
== AF_INET
)
1873 return tcp_v4_tw_get_peer(sk
);
1875 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1878 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1879 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1880 .twsk_unique
= tcp_twsk_unique
,
1881 .twsk_destructor
= tcp_twsk_destructor
,
1882 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1885 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1886 .queue_xmit
= inet6_csk_xmit
,
1887 .send_check
= tcp_v6_send_check
,
1888 .rebuild_header
= inet6_sk_rebuild_header
,
1889 .conn_request
= tcp_v6_conn_request
,
1890 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1891 .get_peer
= tcp_v6_get_peer
,
1892 .net_header_len
= sizeof(struct ipv6hdr
),
1893 .setsockopt
= ipv6_setsockopt
,
1894 .getsockopt
= ipv6_getsockopt
,
1895 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1896 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1897 .bind_conflict
= inet6_csk_bind_conflict
,
1898 #ifdef CONFIG_COMPAT
1899 .compat_setsockopt
= compat_ipv6_setsockopt
,
1900 .compat_getsockopt
= compat_ipv6_getsockopt
,
1904 #ifdef CONFIG_TCP_MD5SIG
1905 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1906 .md5_lookup
= tcp_v6_md5_lookup
,
1907 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1908 .md5_add
= tcp_v6_md5_add_func
,
1909 .md5_parse
= tcp_v6_parse_md5_keys
,
1914 * TCP over IPv4 via INET6 API
1917 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1918 .queue_xmit
= ip_queue_xmit
,
1919 .send_check
= tcp_v4_send_check
,
1920 .rebuild_header
= inet_sk_rebuild_header
,
1921 .conn_request
= tcp_v6_conn_request
,
1922 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1923 .get_peer
= tcp_v4_get_peer
,
1924 .net_header_len
= sizeof(struct iphdr
),
1925 .setsockopt
= ipv6_setsockopt
,
1926 .getsockopt
= ipv6_getsockopt
,
1927 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1928 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1929 .bind_conflict
= inet6_csk_bind_conflict
,
1930 #ifdef CONFIG_COMPAT
1931 .compat_setsockopt
= compat_ipv6_setsockopt
,
1932 .compat_getsockopt
= compat_ipv6_getsockopt
,
1936 #ifdef CONFIG_TCP_MD5SIG
1937 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1938 .md5_lookup
= tcp_v4_md5_lookup
,
1939 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1940 .md5_add
= tcp_v6_md5_add_func
,
1941 .md5_parse
= tcp_v6_parse_md5_keys
,
1945 /* NOTE: A lot of things set to zero explicitly by call to
1946 * sk_alloc() so need not be done here.
1948 static int tcp_v6_init_sock(struct sock
*sk
)
1950 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1951 struct tcp_sock
*tp
= tcp_sk(sk
);
1953 skb_queue_head_init(&tp
->out_of_order_queue
);
1954 tcp_init_xmit_timers(sk
);
1955 tcp_prequeue_init(tp
);
1957 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1958 tp
->mdev
= TCP_TIMEOUT_INIT
;
1960 /* So many TCP implementations out there (incorrectly) count the
1961 * initial SYN frame in their delayed-ACK and congestion control
1962 * algorithms that we must have the following bandaid to talk
1963 * efficiently to them. -DaveM
1967 /* See draft-stevens-tcpca-spec-01 for discussion of the
1968 * initialization of these values.
1970 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1971 tp
->snd_cwnd_clamp
= ~0;
1972 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1974 tp
->reordering
= sysctl_tcp_reordering
;
1976 sk
->sk_state
= TCP_CLOSE
;
1978 icsk
->icsk_af_ops
= &ipv6_specific
;
1979 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1980 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1981 sk
->sk_write_space
= sk_stream_write_space
;
1982 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1984 #ifdef CONFIG_TCP_MD5SIG
1985 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1988 /* TCP Cookie Transactions */
1989 if (sysctl_tcp_cookie_size
> 0) {
1990 /* Default, cookies without s_data_payload. */
1992 kzalloc(sizeof(*tp
->cookie_values
),
1994 if (tp
->cookie_values
!= NULL
)
1995 kref_init(&tp
->cookie_values
->kref
);
1997 /* Presumed zeroed, in order of appearance:
1998 * cookie_in_always, cookie_out_never,
1999 * s_data_constant, s_data_in, s_data_out
2001 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
2002 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
2005 percpu_counter_inc(&tcp_sockets_allocated
);
2011 static void tcp_v6_destroy_sock(struct sock
*sk
)
2013 #ifdef CONFIG_TCP_MD5SIG
2014 /* Clean up the MD5 key list */
2015 if (tcp_sk(sk
)->md5sig_info
)
2016 tcp_v6_clear_md5_list(sk
);
2018 tcp_v4_destroy_sock(sk
);
2019 inet6_destroy_sock(sk
);
2022 #ifdef CONFIG_PROC_FS
2023 /* Proc filesystem TCPv6 sock list dumping. */
2024 static void get_openreq6(struct seq_file
*seq
,
2025 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
2027 int ttd
= req
->expires
- jiffies
;
2028 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
2029 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
2035 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2036 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2038 src
->s6_addr32
[0], src
->s6_addr32
[1],
2039 src
->s6_addr32
[2], src
->s6_addr32
[3],
2040 ntohs(inet_rsk(req
)->loc_port
),
2041 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2042 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
2043 ntohs(inet_rsk(req
)->rmt_port
),
2045 0,0, /* could print option size, but that is af dependent. */
2046 1, /* timers active (only the expire timer) */
2047 jiffies_to_clock_t(ttd
),
2050 0, /* non standard timer */
2051 0, /* open_requests have no inode */
2055 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2057 struct in6_addr
*dest
, *src
;
2060 unsigned long timer_expires
;
2061 struct inet_sock
*inet
= inet_sk(sp
);
2062 struct tcp_sock
*tp
= tcp_sk(sp
);
2063 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2064 struct ipv6_pinfo
*np
= inet6_sk(sp
);
2067 src
= &np
->rcv_saddr
;
2068 destp
= ntohs(inet
->inet_dport
);
2069 srcp
= ntohs(inet
->inet_sport
);
2071 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2073 timer_expires
= icsk
->icsk_timeout
;
2074 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2076 timer_expires
= icsk
->icsk_timeout
;
2077 } else if (timer_pending(&sp
->sk_timer
)) {
2079 timer_expires
= sp
->sk_timer
.expires
;
2082 timer_expires
= jiffies
;
2086 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2087 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2089 src
->s6_addr32
[0], src
->s6_addr32
[1],
2090 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2091 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2092 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2094 tp
->write_seq
-tp
->snd_una
,
2095 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2097 jiffies_to_clock_t(timer_expires
- jiffies
),
2098 icsk
->icsk_retransmits
,
2100 icsk
->icsk_probes_out
,
2102 atomic_read(&sp
->sk_refcnt
), sp
,
2103 jiffies_to_clock_t(icsk
->icsk_rto
),
2104 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2105 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2107 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
2111 static void get_timewait6_sock(struct seq_file
*seq
,
2112 struct inet_timewait_sock
*tw
, int i
)
2114 struct in6_addr
*dest
, *src
;
2116 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2117 int ttd
= tw
->tw_ttd
- jiffies
;
2122 dest
= &tw6
->tw_v6_daddr
;
2123 src
= &tw6
->tw_v6_rcv_saddr
;
2124 destp
= ntohs(tw
->tw_dport
);
2125 srcp
= ntohs(tw
->tw_sport
);
2128 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2129 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2131 src
->s6_addr32
[0], src
->s6_addr32
[1],
2132 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2133 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2134 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2135 tw
->tw_substate
, 0, 0,
2136 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2137 atomic_read(&tw
->tw_refcnt
), tw
);
2140 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2142 struct tcp_iter_state
*st
;
2144 if (v
== SEQ_START_TOKEN
) {
2149 "st tx_queue rx_queue tr tm->when retrnsmt"
2150 " uid timeout inode\n");
2155 switch (st
->state
) {
2156 case TCP_SEQ_STATE_LISTENING
:
2157 case TCP_SEQ_STATE_ESTABLISHED
:
2158 get_tcp6_sock(seq
, v
, st
->num
);
2160 case TCP_SEQ_STATE_OPENREQ
:
2161 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2163 case TCP_SEQ_STATE_TIME_WAIT
:
2164 get_timewait6_sock(seq
, v
, st
->num
);
2171 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2175 .owner
= THIS_MODULE
,
2178 .show
= tcp6_seq_show
,
2182 int __net_init
tcp6_proc_init(struct net
*net
)
2184 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2187 void tcp6_proc_exit(struct net
*net
)
2189 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2193 struct proto tcpv6_prot
= {
2195 .owner
= THIS_MODULE
,
2197 .connect
= tcp_v6_connect
,
2198 .disconnect
= tcp_disconnect
,
2199 .accept
= inet_csk_accept
,
2201 .init
= tcp_v6_init_sock
,
2202 .destroy
= tcp_v6_destroy_sock
,
2203 .shutdown
= tcp_shutdown
,
2204 .setsockopt
= tcp_setsockopt
,
2205 .getsockopt
= tcp_getsockopt
,
2206 .recvmsg
= tcp_recvmsg
,
2207 .sendmsg
= tcp_sendmsg
,
2208 .sendpage
= tcp_sendpage
,
2209 .backlog_rcv
= tcp_v6_do_rcv
,
2210 .hash
= tcp_v6_hash
,
2211 .unhash
= inet_unhash
,
2212 .get_port
= inet_csk_get_port
,
2213 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2214 .sockets_allocated
= &tcp_sockets_allocated
,
2215 .memory_allocated
= &tcp_memory_allocated
,
2216 .memory_pressure
= &tcp_memory_pressure
,
2217 .orphan_count
= &tcp_orphan_count
,
2218 .sysctl_mem
= sysctl_tcp_mem
,
2219 .sysctl_wmem
= sysctl_tcp_wmem
,
2220 .sysctl_rmem
= sysctl_tcp_rmem
,
2221 .max_header
= MAX_TCP_HEADER
,
2222 .obj_size
= sizeof(struct tcp6_sock
),
2223 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2224 .twsk_prot
= &tcp6_timewait_sock_ops
,
2225 .rsk_prot
= &tcp6_request_sock_ops
,
2226 .h
.hashinfo
= &tcp_hashinfo
,
2227 .no_autobind
= true,
2228 #ifdef CONFIG_COMPAT
2229 .compat_setsockopt
= compat_tcp_setsockopt
,
2230 .compat_getsockopt
= compat_tcp_getsockopt
,
2234 static const struct inet6_protocol tcpv6_protocol
= {
2235 .handler
= tcp_v6_rcv
,
2236 .err_handler
= tcp_v6_err
,
2237 .gso_send_check
= tcp_v6_gso_send_check
,
2238 .gso_segment
= tcp_tso_segment
,
2239 .gro_receive
= tcp6_gro_receive
,
2240 .gro_complete
= tcp6_gro_complete
,
2241 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2244 static struct inet_protosw tcpv6_protosw
= {
2245 .type
= SOCK_STREAM
,
2246 .protocol
= IPPROTO_TCP
,
2247 .prot
= &tcpv6_prot
,
2248 .ops
= &inet6_stream_ops
,
2250 .flags
= INET_PROTOSW_PERMANENT
|
2254 static int __net_init
tcpv6_net_init(struct net
*net
)
2256 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2257 SOCK_RAW
, IPPROTO_TCP
, net
);
2260 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2262 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2265 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2267 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2270 static struct pernet_operations tcpv6_net_ops
= {
2271 .init
= tcpv6_net_init
,
2272 .exit
= tcpv6_net_exit
,
2273 .exit_batch
= tcpv6_net_exit_batch
,
2276 int __init
tcpv6_init(void)
2280 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2284 /* register inet6 protocol */
2285 ret
= inet6_register_protosw(&tcpv6_protosw
);
2287 goto out_tcpv6_protocol
;
2289 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2291 goto out_tcpv6_protosw
;
2296 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2298 inet6_unregister_protosw(&tcpv6_protosw
);
2302 void tcpv6_exit(void)
2304 unregister_pernet_subsys(&tcpv6_net_ops
);
2305 inet6_unregister_protosw(&tcpv6_protosw
);
2306 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);