3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/net.h>
32 #include <linux/jiffies.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/init.h>
37 #include <linux/jhash.h>
38 #include <linux/ipsec.h>
39 #include <linux/times.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
46 #include <net/ndisc.h>
47 #include <net/inet6_hashtables.h>
48 #include <net/inet6_connection_sock.h>
50 #include <net/transp_v6.h>
51 #include <net/addrconf.h>
52 #include <net/ip6_route.h>
53 #include <net/ip6_checksum.h>
54 #include <net/inet_ecn.h>
55 #include <net/protocol.h>
58 #include <net/dsfield.h>
59 #include <net/timewait_sock.h>
60 #include <net/netdma.h>
61 #include <net/inet_common.h>
63 #include <asm/uaccess.h>
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
68 #include <linux/crypto.h>
69 #include <linux/scatterlist.h>
71 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
72 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
73 struct request_sock
*req
);
75 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 static struct inet_connection_sock_af_ops ipv6_mapped
;
78 static struct inet_connection_sock_af_ops ipv6_specific
;
79 #ifdef CONFIG_TCP_MD5SIG
80 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
81 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
83 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
84 struct in6_addr
*addr
)
90 static void tcp_v6_hash(struct sock
*sk
)
92 if (sk
->sk_state
!= TCP_CLOSE
) {
93 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
103 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
104 struct in6_addr
*saddr
,
105 struct in6_addr
*daddr
,
108 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
111 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
113 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
114 ipv6_hdr(skb
)->saddr
.s6_addr32
,
116 tcp_hdr(skb
)->source
);
119 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
122 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
123 struct inet_sock
*inet
= inet_sk(sk
);
124 struct inet_connection_sock
*icsk
= inet_csk(sk
);
125 struct ipv6_pinfo
*np
= inet6_sk(sk
);
126 struct tcp_sock
*tp
= tcp_sk(sk
);
127 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
129 struct dst_entry
*dst
;
133 if (addr_len
< SIN6_LEN_RFC2133
)
136 if (usin
->sin6_family
!= AF_INET6
)
137 return(-EAFNOSUPPORT
);
139 memset(&fl
, 0, sizeof(fl
));
142 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
143 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
144 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
145 struct ip6_flowlabel
*flowlabel
;
146 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
147 if (flowlabel
== NULL
)
149 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
150 fl6_sock_release(flowlabel
);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if(ipv6_addr_any(&usin
->sin6_addr
))
159 usin
->sin6_addr
.s6_addr
[15] = 0x1;
161 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
163 if(addr_type
& IPV6_ADDR_MULTICAST
)
166 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
167 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
168 usin
->sin6_scope_id
) {
169 /* If interface is set while binding, indices
172 if (sk
->sk_bound_dev_if
&&
173 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
176 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
179 /* Connect to link-local address requires an interface */
180 if (!sk
->sk_bound_dev_if
)
184 if (tp
->rx_opt
.ts_recent_stamp
&&
185 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
186 tp
->rx_opt
.ts_recent
= 0;
187 tp
->rx_opt
.ts_recent_stamp
= 0;
191 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
192 np
->flow_label
= fl
.fl6_flowlabel
;
198 if (addr_type
== IPV6_ADDR_MAPPED
) {
199 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
200 struct sockaddr_in sin
;
202 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
204 if (__ipv6_only_sock(sk
))
207 sin
.sin_family
= AF_INET
;
208 sin
.sin_port
= usin
->sin6_port
;
209 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
211 icsk
->icsk_af_ops
= &ipv6_mapped
;
212 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
213 #ifdef CONFIG_TCP_MD5SIG
214 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
217 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
220 icsk
->icsk_ext_hdr_len
= exthdrlen
;
221 icsk
->icsk_af_ops
= &ipv6_specific
;
222 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
223 #ifdef CONFIG_TCP_MD5SIG
224 tp
->af_specific
= &tcp_sock_ipv6_specific
;
228 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
230 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
237 if (!ipv6_addr_any(&np
->rcv_saddr
))
238 saddr
= &np
->rcv_saddr
;
240 fl
.proto
= IPPROTO_TCP
;
241 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
242 ipv6_addr_copy(&fl
.fl6_src
,
243 (saddr
? saddr
: &np
->saddr
));
244 fl
.oif
= sk
->sk_bound_dev_if
;
245 fl
.fl_ip_dport
= usin
->sin6_port
;
246 fl
.fl_ip_sport
= inet
->sport
;
248 if (np
->opt
&& np
->opt
->srcrt
) {
249 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
250 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
251 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
255 security_sk_classify_flow(sk
, &fl
);
257 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
261 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
263 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
)) < 0) {
265 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
272 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
275 /* set the source address */
276 ipv6_addr_copy(&np
->saddr
, saddr
);
277 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
279 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
280 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
282 icsk
->icsk_ext_hdr_len
= 0;
284 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
287 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
289 inet
->dport
= usin
->sin6_port
;
291 tcp_set_state(sk
, TCP_SYN_SENT
);
292 err
= inet6_hash_connect(&tcp_death_row
, sk
);
297 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
302 err
= tcp_connect(sk
);
309 tcp_set_state(sk
, TCP_CLOSE
);
313 sk
->sk_route_caps
= 0;
317 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
318 int type
, int code
, int offset
, __be32 info
)
320 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
321 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
322 struct ipv6_pinfo
*np
;
327 struct net
*net
= dev_net(skb
->dev
);
329 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
330 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
333 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
338 if (sk
->sk_state
== TCP_TIME_WAIT
) {
339 inet_twsk_put(inet_twsk(sk
));
344 if (sock_owned_by_user(sk
))
345 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
347 if (sk
->sk_state
== TCP_CLOSE
)
351 seq
= ntohl(th
->seq
);
352 if (sk
->sk_state
!= TCP_LISTEN
&&
353 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
354 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
360 if (type
== ICMPV6_PKT_TOOBIG
) {
361 struct dst_entry
*dst
= NULL
;
363 if (sock_owned_by_user(sk
))
365 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
368 /* icmp should have updated the destination cache entry */
369 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
372 struct inet_sock
*inet
= inet_sk(sk
);
375 /* BUGGG_FUTURE: Again, it is not clear how
376 to handle rthdr case. Ignore this complexity
379 memset(&fl
, 0, sizeof(fl
));
380 fl
.proto
= IPPROTO_TCP
;
381 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
382 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
383 fl
.oif
= sk
->sk_bound_dev_if
;
384 fl
.fl_ip_dport
= inet
->dport
;
385 fl
.fl_ip_sport
= inet
->sport
;
386 security_skb_classify_flow(skb
, &fl
);
388 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
389 sk
->sk_err_soft
= -err
;
393 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
394 sk
->sk_err_soft
= -err
;
401 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
402 tcp_sync_mss(sk
, dst_mtu(dst
));
403 tcp_simple_retransmit(sk
);
404 } /* else let the usual retransmit timer handle it */
409 icmpv6_err_convert(type
, code
, &err
);
411 /* Might be for an request_sock */
412 switch (sk
->sk_state
) {
413 struct request_sock
*req
, **prev
;
415 if (sock_owned_by_user(sk
))
418 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
419 &hdr
->saddr
, inet6_iif(skb
));
423 /* ICMPs are not backlogged, hence we cannot get
424 * an established socket here.
426 WARN_ON(req
->sk
!= NULL
);
428 if (seq
!= tcp_rsk(req
)->snt_isn
) {
429 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
433 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
437 case TCP_SYN_RECV
: /* Cannot happen.
438 It can, it SYNs are crossed. --ANK */
439 if (!sock_owned_by_user(sk
)) {
441 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
445 sk
->sk_err_soft
= err
;
449 if (!sock_owned_by_user(sk
) && np
->recverr
) {
451 sk
->sk_error_report(sk
);
453 sk
->sk_err_soft
= err
;
461 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
463 struct inet6_request_sock
*treq
= inet6_rsk(req
);
464 struct ipv6_pinfo
*np
= inet6_sk(sk
);
465 struct sk_buff
* skb
;
466 struct ipv6_txoptions
*opt
= NULL
;
467 struct in6_addr
* final_p
= NULL
, final
;
469 struct dst_entry
*dst
;
472 memset(&fl
, 0, sizeof(fl
));
473 fl
.proto
= IPPROTO_TCP
;
474 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
475 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
476 fl
.fl6_flowlabel
= 0;
478 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
479 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
480 security_req_classify_flow(req
, &fl
);
483 if (opt
&& opt
->srcrt
) {
484 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
485 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
486 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
490 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
494 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
495 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
498 skb
= tcp_make_synack(sk
, dst
, req
);
500 struct tcphdr
*th
= tcp_hdr(skb
);
502 th
->check
= tcp_v6_check(th
, skb
->len
,
503 &treq
->loc_addr
, &treq
->rmt_addr
,
504 csum_partial((char *)th
, skb
->len
, skb
->csum
));
506 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
507 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
508 err
= net_xmit_eval(err
);
512 if (opt
&& opt
!= np
->opt
)
513 sock_kfree_s(sk
, opt
, opt
->tot_len
);
518 static inline void syn_flood_warning(struct sk_buff
*skb
)
520 #ifdef CONFIG_SYN_COOKIES
521 if (sysctl_tcp_syncookies
)
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
528 "TCPv6: Possible SYN flooding on port %d. "
529 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
532 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
534 if (inet6_rsk(req
)->pktopts
)
535 kfree_skb(inet6_rsk(req
)->pktopts
);
538 #ifdef CONFIG_TCP_MD5SIG
539 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
540 struct in6_addr
*addr
)
542 struct tcp_sock
*tp
= tcp_sk(sk
);
547 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
550 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
551 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
552 return &tp
->md5sig_info
->keys6
[i
].base
;
557 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
558 struct sock
*addr_sk
)
560 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
563 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
564 struct request_sock
*req
)
566 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
569 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
570 char *newkey
, u8 newkeylen
)
572 /* Add key to the list */
573 struct tcp_md5sig_key
*key
;
574 struct tcp_sock
*tp
= tcp_sk(sk
);
575 struct tcp6_md5sig_key
*keys
;
577 key
= tcp_v6_md5_do_lookup(sk
, peer
);
579 /* modify existing entry - just update that one */
582 key
->keylen
= newkeylen
;
584 /* reallocate new list if current one is full. */
585 if (!tp
->md5sig_info
) {
586 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
587 if (!tp
->md5sig_info
) {
591 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
593 if (tcp_alloc_md5sig_pool() == NULL
) {
597 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
598 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
599 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
602 tcp_free_md5sig_pool();
607 if (tp
->md5sig_info
->entries6
)
608 memmove(keys
, tp
->md5sig_info
->keys6
,
609 (sizeof (tp
->md5sig_info
->keys6
[0]) *
610 tp
->md5sig_info
->entries6
));
612 kfree(tp
->md5sig_info
->keys6
);
613 tp
->md5sig_info
->keys6
= keys
;
614 tp
->md5sig_info
->alloced6
++;
617 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
619 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
620 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
622 tp
->md5sig_info
->entries6
++;
627 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
628 u8
*newkey
, __u8 newkeylen
)
630 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
634 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
636 struct tcp_sock
*tp
= tcp_sk(sk
);
639 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
640 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
642 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
643 tp
->md5sig_info
->entries6
--;
645 if (tp
->md5sig_info
->entries6
== 0) {
646 kfree(tp
->md5sig_info
->keys6
);
647 tp
->md5sig_info
->keys6
= NULL
;
648 tp
->md5sig_info
->alloced6
= 0;
650 /* shrink the database */
651 if (tp
->md5sig_info
->entries6
!= i
)
652 memmove(&tp
->md5sig_info
->keys6
[i
],
653 &tp
->md5sig_info
->keys6
[i
+1],
654 (tp
->md5sig_info
->entries6
- i
)
655 * sizeof (tp
->md5sig_info
->keys6
[0]));
657 tcp_free_md5sig_pool();
664 static void tcp_v6_clear_md5_list (struct sock
*sk
)
666 struct tcp_sock
*tp
= tcp_sk(sk
);
669 if (tp
->md5sig_info
->entries6
) {
670 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
671 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
672 tp
->md5sig_info
->entries6
= 0;
673 tcp_free_md5sig_pool();
676 kfree(tp
->md5sig_info
->keys6
);
677 tp
->md5sig_info
->keys6
= NULL
;
678 tp
->md5sig_info
->alloced6
= 0;
680 if (tp
->md5sig_info
->entries4
) {
681 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
682 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
683 tp
->md5sig_info
->entries4
= 0;
684 tcp_free_md5sig_pool();
687 kfree(tp
->md5sig_info
->keys4
);
688 tp
->md5sig_info
->keys4
= NULL
;
689 tp
->md5sig_info
->alloced4
= 0;
692 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
695 struct tcp_md5sig cmd
;
696 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
699 if (optlen
< sizeof(cmd
))
702 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
705 if (sin6
->sin6_family
!= AF_INET6
)
708 if (!cmd
.tcpm_keylen
) {
709 if (!tcp_sk(sk
)->md5sig_info
)
711 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
712 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
713 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
716 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
719 if (!tcp_sk(sk
)->md5sig_info
) {
720 struct tcp_sock
*tp
= tcp_sk(sk
);
721 struct tcp_md5sig_info
*p
;
723 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
728 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
731 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
734 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
735 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
736 newkey
, cmd
.tcpm_keylen
);
738 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
741 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
742 struct in6_addr
*daddr
,
743 struct in6_addr
*saddr
, int nbytes
)
745 struct tcp6_pseudohdr
*bp
;
746 struct scatterlist sg
;
748 bp
= &hp
->md5_blk
.ip6
;
749 /* 1. TCP pseudo-header (RFC2460) */
750 ipv6_addr_copy(&bp
->saddr
, saddr
);
751 ipv6_addr_copy(&bp
->daddr
, daddr
);
752 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
753 bp
->len
= cpu_to_be32(nbytes
);
755 sg_init_one(&sg
, bp
, sizeof(*bp
));
756 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
759 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
760 struct in6_addr
*daddr
, struct in6_addr
*saddr
,
763 struct tcp_md5sig_pool
*hp
;
764 struct hash_desc
*desc
;
766 hp
= tcp_get_md5sig_pool();
768 goto clear_hash_noput
;
769 desc
= &hp
->md5_desc
;
771 if (crypto_hash_init(desc
))
773 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
775 if (tcp_md5_hash_header(hp
, th
))
777 if (tcp_md5_hash_key(hp
, key
))
779 if (crypto_hash_final(desc
, md5_hash
))
782 tcp_put_md5sig_pool();
786 tcp_put_md5sig_pool();
788 memset(md5_hash
, 0, 16);
792 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
793 struct sock
*sk
, struct request_sock
*req
,
796 struct in6_addr
*saddr
, *daddr
;
797 struct tcp_md5sig_pool
*hp
;
798 struct hash_desc
*desc
;
799 struct tcphdr
*th
= tcp_hdr(skb
);
802 saddr
= &inet6_sk(sk
)->saddr
;
803 daddr
= &inet6_sk(sk
)->daddr
;
805 saddr
= &inet6_rsk(req
)->loc_addr
;
806 daddr
= &inet6_rsk(req
)->rmt_addr
;
808 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
809 saddr
= &ip6h
->saddr
;
810 daddr
= &ip6h
->daddr
;
813 hp
= tcp_get_md5sig_pool();
815 goto clear_hash_noput
;
816 desc
= &hp
->md5_desc
;
818 if (crypto_hash_init(desc
))
821 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
823 if (tcp_md5_hash_header(hp
, th
))
825 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
827 if (tcp_md5_hash_key(hp
, key
))
829 if (crypto_hash_final(desc
, md5_hash
))
832 tcp_put_md5sig_pool();
836 tcp_put_md5sig_pool();
838 memset(md5_hash
, 0, 16);
842 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
844 __u8
*hash_location
= NULL
;
845 struct tcp_md5sig_key
*hash_expected
;
846 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
847 struct tcphdr
*th
= tcp_hdr(skb
);
851 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
852 hash_location
= tcp_parse_md5sig_option(th
);
854 /* We've parsed the options - do we have a hash? */
855 if (!hash_expected
&& !hash_location
)
858 if (hash_expected
&& !hash_location
) {
859 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
863 if (!hash_expected
&& hash_location
) {
864 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
868 /* check the signature */
869 genhash
= tcp_v6_md5_hash_skb(newhash
,
873 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
874 if (net_ratelimit()) {
875 printk(KERN_INFO
"MD5 Hash %s for "
876 "(" NIP6_FMT
", %u)->"
877 "(" NIP6_FMT
", %u)\n",
878 genhash
? "failed" : "mismatch",
879 NIP6(ip6h
->saddr
), ntohs(th
->source
),
880 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
888 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
890 .obj_size
= sizeof(struct tcp6_request_sock
),
891 .rtx_syn_ack
= tcp_v6_send_synack
,
892 .send_ack
= tcp_v6_reqsk_send_ack
,
893 .destructor
= tcp_v6_reqsk_destructor
,
894 .send_reset
= tcp_v6_send_reset
897 #ifdef CONFIG_TCP_MD5SIG
898 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
899 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
903 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
904 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
905 .twsk_unique
= tcp_twsk_unique
,
906 .twsk_destructor
= tcp_twsk_destructor
,
909 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
911 struct ipv6_pinfo
*np
= inet6_sk(sk
);
912 struct tcphdr
*th
= tcp_hdr(skb
);
914 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
915 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
916 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
917 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
919 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
920 csum_partial((char *)th
, th
->doff
<<2,
925 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
927 struct ipv6hdr
*ipv6h
;
930 if (!pskb_may_pull(skb
, sizeof(*th
)))
933 ipv6h
= ipv6_hdr(skb
);
937 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
939 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
940 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
941 skb
->ip_summed
= CHECKSUM_PARTIAL
;
945 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
947 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
948 struct sk_buff
*buff
;
950 struct net
*net
= dev_net(skb
->dst
->dev
);
951 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
952 unsigned int tot_len
= sizeof(struct tcphdr
);
953 #ifdef CONFIG_TCP_MD5SIG
954 struct tcp_md5sig_key
*key
;
960 if (!ipv6_unicast_destination(skb
))
963 #ifdef CONFIG_TCP_MD5SIG
965 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
970 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
974 * We need to grab some memory, and put together an RST,
975 * and then put it into the queue to be sent.
978 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
983 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
985 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
987 /* Swap the send and the receive. */
988 memset(t1
, 0, sizeof(*t1
));
989 t1
->dest
= th
->source
;
990 t1
->source
= th
->dest
;
991 t1
->doff
= tot_len
/ 4;
995 t1
->seq
= th
->ack_seq
;
998 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
999 + skb
->len
- (th
->doff
<<2));
1002 #ifdef CONFIG_TCP_MD5SIG
1004 __be32
*opt
= (__be32
*)(t1
+ 1);
1005 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1006 (TCPOPT_NOP
<< 16) |
1007 (TCPOPT_MD5SIG
<< 8) |
1009 tcp_v6_md5_hash_hdr((__u8
*)&opt
[1], key
,
1010 &ipv6_hdr(skb
)->saddr
,
1011 &ipv6_hdr(skb
)->daddr
, t1
);
1015 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1017 memset(&fl
, 0, sizeof(fl
));
1018 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1019 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1021 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1022 tot_len
, IPPROTO_TCP
,
1025 fl
.proto
= IPPROTO_TCP
;
1026 fl
.oif
= inet6_iif(skb
);
1027 fl
.fl_ip_dport
= t1
->dest
;
1028 fl
.fl_ip_sport
= t1
->source
;
1029 security_skb_classify_flow(skb
, &fl
);
1031 /* Pass a socket to ip6_dst_lookup either it is for RST
1032 * Underlying function will use this to retrieve the network
1035 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1036 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1037 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1038 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1039 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1047 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1048 struct tcp_md5sig_key
*key
)
1050 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
1051 struct sk_buff
*buff
;
1053 struct net
*net
= dev_net(skb
->dst
->dev
);
1054 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
1055 unsigned int tot_len
= sizeof(struct tcphdr
);
1059 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1060 #ifdef CONFIG_TCP_MD5SIG
1062 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1065 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1070 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1072 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1074 /* Swap the send and the receive. */
1075 memset(t1
, 0, sizeof(*t1
));
1076 t1
->dest
= th
->source
;
1077 t1
->source
= th
->dest
;
1078 t1
->doff
= tot_len
/ 4;
1079 t1
->seq
= htonl(seq
);
1080 t1
->ack_seq
= htonl(ack
);
1082 t1
->window
= htons(win
);
1084 topt
= (__be32
*)(t1
+ 1);
1087 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1088 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1089 *topt
++ = htonl(tcp_time_stamp
);
1090 *topt
++ = htonl(ts
);
1093 #ifdef CONFIG_TCP_MD5SIG
1095 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1096 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1097 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1098 &ipv6_hdr(skb
)->saddr
,
1099 &ipv6_hdr(skb
)->daddr
, t1
);
1103 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1105 memset(&fl
, 0, sizeof(fl
));
1106 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1107 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1109 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1110 tot_len
, IPPROTO_TCP
,
1113 fl
.proto
= IPPROTO_TCP
;
1114 fl
.oif
= inet6_iif(skb
);
1115 fl
.fl_ip_dport
= t1
->dest
;
1116 fl
.fl_ip_sport
= t1
->source
;
1117 security_skb_classify_flow(skb
, &fl
);
1119 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1120 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1121 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1122 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1130 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1132 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1133 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1135 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1136 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1137 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1142 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1143 struct request_sock
*req
)
1145 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1146 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1150 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1152 struct request_sock
*req
, **prev
;
1153 const struct tcphdr
*th
= tcp_hdr(skb
);
1156 /* Find possible connection requests. */
1157 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1158 &ipv6_hdr(skb
)->saddr
,
1159 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1161 return tcp_check_req(sk
, skb
, req
, prev
);
1163 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1164 &ipv6_hdr(skb
)->saddr
, th
->source
,
1165 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1168 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1172 inet_twsk_put(inet_twsk(nsk
));
1176 #ifdef CONFIG_SYN_COOKIES
1177 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1178 sk
= cookie_v6_check(sk
, skb
);
1183 /* FIXME: this is substantially similar to the ipv4 code.
1184 * Can some kind of merge be done? -- erics
1186 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1188 struct inet6_request_sock
*treq
;
1189 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1190 struct tcp_options_received tmp_opt
;
1191 struct tcp_sock
*tp
= tcp_sk(sk
);
1192 struct request_sock
*req
= NULL
;
1193 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1194 #ifdef CONFIG_SYN_COOKIES
1195 int want_cookie
= 0;
1197 #define want_cookie 0
1200 if (skb
->protocol
== htons(ETH_P_IP
))
1201 return tcp_v4_conn_request(sk
, skb
);
1203 if (!ipv6_unicast_destination(skb
))
1206 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1207 if (net_ratelimit())
1208 syn_flood_warning(skb
);
1209 #ifdef CONFIG_SYN_COOKIES
1210 if (sysctl_tcp_syncookies
)
1217 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1220 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1224 #ifdef CONFIG_TCP_MD5SIG
1225 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1228 tcp_clear_options(&tmp_opt
);
1229 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1230 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1232 tcp_parse_options(skb
, &tmp_opt
, 0);
1234 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1235 tcp_clear_options(&tmp_opt
);
1237 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1238 tcp_openreq_init(req
, &tmp_opt
, skb
);
1240 treq
= inet6_rsk(req
);
1241 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1242 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1244 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1247 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1248 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1250 if (ipv6_opt_accepted(sk
, skb
) ||
1251 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1252 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1253 atomic_inc(&skb
->users
);
1254 treq
->pktopts
= skb
;
1256 treq
->iif
= sk
->sk_bound_dev_if
;
1258 /* So that link locals have meaning */
1259 if (!sk
->sk_bound_dev_if
&&
1260 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1261 treq
->iif
= inet6_iif(skb
);
1263 isn
= tcp_v6_init_sequence(skb
);
1266 tcp_rsk(req
)->snt_isn
= isn
;
1268 security_inet_conn_request(sk
, skb
, req
);
1270 if (tcp_v6_send_synack(sk
, req
))
1274 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1282 return 0; /* don't send reset */
1285 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1286 struct request_sock
*req
,
1287 struct dst_entry
*dst
)
1289 struct inet6_request_sock
*treq
;
1290 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1291 struct tcp6_sock
*newtcp6sk
;
1292 struct inet_sock
*newinet
;
1293 struct tcp_sock
*newtp
;
1295 struct ipv6_txoptions
*opt
;
1296 #ifdef CONFIG_TCP_MD5SIG
1297 struct tcp_md5sig_key
*key
;
1300 if (skb
->protocol
== htons(ETH_P_IP
)) {
1305 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1310 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1311 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1313 newinet
= inet_sk(newsk
);
1314 newnp
= inet6_sk(newsk
);
1315 newtp
= tcp_sk(newsk
);
1317 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1319 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1322 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1325 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1327 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1328 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1329 #ifdef CONFIG_TCP_MD5SIG
1330 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1333 newnp
->pktoptions
= NULL
;
1335 newnp
->mcast_oif
= inet6_iif(skb
);
1336 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1339 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1340 * here, tcp_create_openreq_child now does this for us, see the comment in
1341 * that function for the gory details. -acme
1344 /* It is tricky place. Until this moment IPv4 tcp
1345 worked with IPv6 icsk.icsk_af_ops.
1348 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1353 treq
= inet6_rsk(req
);
1356 if (sk_acceptq_is_full(sk
))
1360 struct in6_addr
*final_p
= NULL
, final
;
1363 memset(&fl
, 0, sizeof(fl
));
1364 fl
.proto
= IPPROTO_TCP
;
1365 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1366 if (opt
&& opt
->srcrt
) {
1367 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1368 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1369 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1372 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1373 fl
.oif
= sk
->sk_bound_dev_if
;
1374 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1375 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1376 security_req_classify_flow(req
, &fl
);
1378 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1382 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1384 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1388 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1393 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1394 * count here, tcp_create_openreq_child now does this for us, see the
1395 * comment in that function for the gory details. -acme
1398 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1399 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1401 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1402 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1404 newtp
= tcp_sk(newsk
);
1405 newinet
= inet_sk(newsk
);
1406 newnp
= inet6_sk(newsk
);
1408 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1410 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1411 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1412 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1413 newsk
->sk_bound_dev_if
= treq
->iif
;
1415 /* Now IPv6 options...
1417 First: no IPv4 options.
1419 newinet
->opt
= NULL
;
1420 newnp
->ipv6_fl_list
= NULL
;
1423 newnp
->rxopt
.all
= np
->rxopt
.all
;
1425 /* Clone pktoptions received with SYN */
1426 newnp
->pktoptions
= NULL
;
1427 if (treq
->pktopts
!= NULL
) {
1428 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1429 kfree_skb(treq
->pktopts
);
1430 treq
->pktopts
= NULL
;
1431 if (newnp
->pktoptions
)
1432 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1435 newnp
->mcast_oif
= inet6_iif(skb
);
1436 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1438 /* Clone native IPv6 options from listening socket (if any)
1440 Yes, keeping reference count would be much more clever,
1441 but we make one more one thing there: reattach optmem
1445 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1447 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1450 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1452 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1453 newnp
->opt
->opt_flen
);
1455 tcp_mtup_init(newsk
);
1456 tcp_sync_mss(newsk
, dst_mtu(dst
));
1457 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1458 tcp_initialize_rcv_mss(newsk
);
1460 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1462 #ifdef CONFIG_TCP_MD5SIG
1463 /* Copy over the MD5 key from the original socket */
1464 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1465 /* We're using one, so create a matching key
1466 * on the newsk structure. If we fail to get
1467 * memory, then we end up not copying the key
1470 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1472 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1473 newkey
, key
->keylen
);
1477 __inet6_hash(newsk
);
1478 __inet_inherit_port(sk
, newsk
);
1483 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1485 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1486 if (opt
&& opt
!= np
->opt
)
1487 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1492 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1494 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1495 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1496 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1497 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1502 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1503 &ipv6_hdr(skb
)->saddr
,
1504 &ipv6_hdr(skb
)->daddr
, 0));
1506 if (skb
->len
<= 76) {
1507 return __skb_checksum_complete(skb
);
1512 /* The socket must have it's spinlock held when we get
1515 * We have a potential double-lock case here, so even when
1516 * doing backlog processing we use the BH locking scheme.
1517 * This is because we cannot sleep with the original spinlock
1520 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1522 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1523 struct tcp_sock
*tp
;
1524 struct sk_buff
*opt_skb
= NULL
;
1526 /* Imagine: socket is IPv6. IPv4 packet arrives,
1527 goes to IPv4 receive handler and backlogged.
1528 From backlog it always goes here. Kerboom...
1529 Fortunately, tcp_rcv_established and rcv_established
1530 handle them correctly, but it is not case with
1531 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1534 if (skb
->protocol
== htons(ETH_P_IP
))
1535 return tcp_v4_do_rcv(sk
, skb
);
1537 #ifdef CONFIG_TCP_MD5SIG
1538 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1542 if (sk_filter(sk
, skb
))
1546 * socket locking is here for SMP purposes as backlog rcv
1547 * is currently called with bh processing disabled.
1550 /* Do Stevens' IPV6_PKTOPTIONS.
1552 Yes, guys, it is the only place in our code, where we
1553 may make it not affecting IPv4.
1554 The rest of code is protocol independent,
1555 and I do not like idea to uglify IPv4.
1557 Actually, all the idea behind IPV6_PKTOPTIONS
1558 looks not very well thought. For now we latch
1559 options, received in the last packet, enqueued
1560 by tcp. Feel free to propose better solution.
1564 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1566 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1567 TCP_CHECK_TIMER(sk
);
1568 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1570 TCP_CHECK_TIMER(sk
);
1572 goto ipv6_pktoptions
;
1576 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1579 if (sk
->sk_state
== TCP_LISTEN
) {
1580 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1585 * Queue it on the new socket if the new socket is active,
1586 * otherwise we just shortcircuit this and continue with
1590 if (tcp_child_process(sk
, nsk
, skb
))
1593 __kfree_skb(opt_skb
);
1598 TCP_CHECK_TIMER(sk
);
1599 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1601 TCP_CHECK_TIMER(sk
);
1603 goto ipv6_pktoptions
;
1607 tcp_v6_send_reset(sk
, skb
);
1610 __kfree_skb(opt_skb
);
1614 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1619 /* Do you ask, what is it?
1621 1. skb was enqueued by tcp.
1622 2. skb is added to tail of read queue, rather than out of order.
1623 3. socket is not in passive state.
1624 4. Finally, it really contains options, which user wants to receive.
1627 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1628 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1629 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1630 np
->mcast_oif
= inet6_iif(opt_skb
);
1631 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1632 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1633 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1634 skb_set_owner_r(opt_skb
, sk
);
1635 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1637 __kfree_skb(opt_skb
);
1638 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1647 static int tcp_v6_rcv(struct sk_buff
*skb
)
1652 struct net
*net
= dev_net(skb
->dev
);
1654 if (skb
->pkt_type
!= PACKET_HOST
)
1658 * Count it even if it's bad.
1660 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1662 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1667 if (th
->doff
< sizeof(struct tcphdr
)/4)
1669 if (!pskb_may_pull(skb
, th
->doff
*4))
1672 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1676 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1677 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1678 skb
->len
- th
->doff
*4);
1679 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1680 TCP_SKB_CB(skb
)->when
= 0;
1681 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1682 TCP_SKB_CB(skb
)->sacked
= 0;
1684 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1689 if (sk
->sk_state
== TCP_TIME_WAIT
)
1692 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1693 goto discard_and_relse
;
1695 if (sk_filter(sk
, skb
))
1696 goto discard_and_relse
;
1700 bh_lock_sock_nested(sk
);
1702 if (!sock_owned_by_user(sk
)) {
1703 #ifdef CONFIG_NET_DMA
1704 struct tcp_sock
*tp
= tcp_sk(sk
);
1705 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1706 tp
->ucopy
.dma_chan
= get_softnet_dma();
1707 if (tp
->ucopy
.dma_chan
)
1708 ret
= tcp_v6_do_rcv(sk
, skb
);
1712 if (!tcp_prequeue(sk
, skb
))
1713 ret
= tcp_v6_do_rcv(sk
, skb
);
1716 sk_add_backlog(sk
, skb
);
1720 return ret
? -1 : 0;
1723 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1726 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1728 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1730 tcp_v6_send_reset(NULL
, skb
);
1747 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1748 inet_twsk_put(inet_twsk(sk
));
1752 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1753 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1754 inet_twsk_put(inet_twsk(sk
));
1758 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1763 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1764 &ipv6_hdr(skb
)->daddr
,
1765 ntohs(th
->dest
), inet6_iif(skb
));
1767 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1768 inet_twsk_deschedule(tw
, &tcp_death_row
);
1773 /* Fall through to ACK */
1776 tcp_v6_timewait_ack(sk
, skb
);
1780 case TCP_TW_SUCCESS
:;
1785 static int tcp_v6_remember_stamp(struct sock
*sk
)
1787 /* Alas, not yet... */
1791 static struct inet_connection_sock_af_ops ipv6_specific
= {
1792 .queue_xmit
= inet6_csk_xmit
,
1793 .send_check
= tcp_v6_send_check
,
1794 .rebuild_header
= inet6_sk_rebuild_header
,
1795 .conn_request
= tcp_v6_conn_request
,
1796 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1797 .remember_stamp
= tcp_v6_remember_stamp
,
1798 .net_header_len
= sizeof(struct ipv6hdr
),
1799 .setsockopt
= ipv6_setsockopt
,
1800 .getsockopt
= ipv6_getsockopt
,
1801 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1802 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1803 .bind_conflict
= inet6_csk_bind_conflict
,
1804 #ifdef CONFIG_COMPAT
1805 .compat_setsockopt
= compat_ipv6_setsockopt
,
1806 .compat_getsockopt
= compat_ipv6_getsockopt
,
1810 #ifdef CONFIG_TCP_MD5SIG
1811 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1812 .md5_lookup
= tcp_v6_md5_lookup
,
1813 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1814 .md5_add
= tcp_v6_md5_add_func
,
1815 .md5_parse
= tcp_v6_parse_md5_keys
,
1820 * TCP over IPv4 via INET6 API
1823 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1824 .queue_xmit
= ip_queue_xmit
,
1825 .send_check
= tcp_v4_send_check
,
1826 .rebuild_header
= inet_sk_rebuild_header
,
1827 .conn_request
= tcp_v6_conn_request
,
1828 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1829 .remember_stamp
= tcp_v4_remember_stamp
,
1830 .net_header_len
= sizeof(struct iphdr
),
1831 .setsockopt
= ipv6_setsockopt
,
1832 .getsockopt
= ipv6_getsockopt
,
1833 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1834 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1835 .bind_conflict
= inet6_csk_bind_conflict
,
1836 #ifdef CONFIG_COMPAT
1837 .compat_setsockopt
= compat_ipv6_setsockopt
,
1838 .compat_getsockopt
= compat_ipv6_getsockopt
,
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1844 .md5_lookup
= tcp_v4_md5_lookup
,
1845 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1846 .md5_add
= tcp_v6_md5_add_func
,
1847 .md5_parse
= tcp_v6_parse_md5_keys
,
1851 /* NOTE: A lot of things set to zero explicitly by call to
1852 * sk_alloc() so need not be done here.
1854 static int tcp_v6_init_sock(struct sock
*sk
)
1856 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1857 struct tcp_sock
*tp
= tcp_sk(sk
);
1859 skb_queue_head_init(&tp
->out_of_order_queue
);
1860 tcp_init_xmit_timers(sk
);
1861 tcp_prequeue_init(tp
);
1863 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1864 tp
->mdev
= TCP_TIMEOUT_INIT
;
1866 /* So many TCP implementations out there (incorrectly) count the
1867 * initial SYN frame in their delayed-ACK and congestion control
1868 * algorithms that we must have the following bandaid to talk
1869 * efficiently to them. -DaveM
1873 /* See draft-stevens-tcpca-spec-01 for discussion of the
1874 * initialization of these values.
1876 tp
->snd_ssthresh
= 0x7fffffff;
1877 tp
->snd_cwnd_clamp
= ~0;
1878 tp
->mss_cache
= 536;
1880 tp
->reordering
= sysctl_tcp_reordering
;
1882 sk
->sk_state
= TCP_CLOSE
;
1884 icsk
->icsk_af_ops
= &ipv6_specific
;
1885 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1886 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1887 sk
->sk_write_space
= sk_stream_write_space
;
1888 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1890 #ifdef CONFIG_TCP_MD5SIG
1891 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1894 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1895 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1897 atomic_inc(&tcp_sockets_allocated
);
1902 static void tcp_v6_destroy_sock(struct sock
*sk
)
1904 #ifdef CONFIG_TCP_MD5SIG
1905 /* Clean up the MD5 key list */
1906 if (tcp_sk(sk
)->md5sig_info
)
1907 tcp_v6_clear_md5_list(sk
);
1909 tcp_v4_destroy_sock(sk
);
1910 inet6_destroy_sock(sk
);
1913 #ifdef CONFIG_PROC_FS
1914 /* Proc filesystem TCPv6 sock list dumping. */
1915 static void get_openreq6(struct seq_file
*seq
,
1916 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1918 int ttd
= req
->expires
- jiffies
;
1919 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1920 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1926 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1927 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1929 src
->s6_addr32
[0], src
->s6_addr32
[1],
1930 src
->s6_addr32
[2], src
->s6_addr32
[3],
1931 ntohs(inet_sk(sk
)->sport
),
1932 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1933 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1934 ntohs(inet_rsk(req
)->rmt_port
),
1936 0,0, /* could print option size, but that is af dependent. */
1937 1, /* timers active (only the expire timer) */
1938 jiffies_to_clock_t(ttd
),
1941 0, /* non standard timer */
1942 0, /* open_requests have no inode */
1946 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1948 struct in6_addr
*dest
, *src
;
1951 unsigned long timer_expires
;
1952 struct inet_sock
*inet
= inet_sk(sp
);
1953 struct tcp_sock
*tp
= tcp_sk(sp
);
1954 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1955 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1958 src
= &np
->rcv_saddr
;
1959 destp
= ntohs(inet
->dport
);
1960 srcp
= ntohs(inet
->sport
);
1962 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1964 timer_expires
= icsk
->icsk_timeout
;
1965 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1967 timer_expires
= icsk
->icsk_timeout
;
1968 } else if (timer_pending(&sp
->sk_timer
)) {
1970 timer_expires
= sp
->sk_timer
.expires
;
1973 timer_expires
= jiffies
;
1977 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1978 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1980 src
->s6_addr32
[0], src
->s6_addr32
[1],
1981 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1982 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1983 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1985 tp
->write_seq
-tp
->snd_una
,
1986 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1988 jiffies_to_clock_t(timer_expires
- jiffies
),
1989 icsk
->icsk_retransmits
,
1991 icsk
->icsk_probes_out
,
1993 atomic_read(&sp
->sk_refcnt
), sp
,
1994 jiffies_to_clock_t(icsk
->icsk_rto
),
1995 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1996 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1997 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2001 static void get_timewait6_sock(struct seq_file
*seq
,
2002 struct inet_timewait_sock
*tw
, int i
)
2004 struct in6_addr
*dest
, *src
;
2006 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2007 int ttd
= tw
->tw_ttd
- jiffies
;
2012 dest
= &tw6
->tw_v6_daddr
;
2013 src
= &tw6
->tw_v6_rcv_saddr
;
2014 destp
= ntohs(tw
->tw_dport
);
2015 srcp
= ntohs(tw
->tw_sport
);
2018 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2019 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2021 src
->s6_addr32
[0], src
->s6_addr32
[1],
2022 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2023 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2024 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2025 tw
->tw_substate
, 0, 0,
2026 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2027 atomic_read(&tw
->tw_refcnt
), tw
);
2030 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2032 struct tcp_iter_state
*st
;
2034 if (v
== SEQ_START_TOKEN
) {
2039 "st tx_queue rx_queue tr tm->when retrnsmt"
2040 " uid timeout inode\n");
2045 switch (st
->state
) {
2046 case TCP_SEQ_STATE_LISTENING
:
2047 case TCP_SEQ_STATE_ESTABLISHED
:
2048 get_tcp6_sock(seq
, v
, st
->num
);
2050 case TCP_SEQ_STATE_OPENREQ
:
2051 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2053 case TCP_SEQ_STATE_TIME_WAIT
:
2054 get_timewait6_sock(seq
, v
, st
->num
);
2061 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2065 .owner
= THIS_MODULE
,
2068 .show
= tcp6_seq_show
,
2072 int tcp6_proc_init(struct net
*net
)
2074 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2077 void tcp6_proc_exit(struct net
*net
)
2079 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2083 struct proto tcpv6_prot
= {
2085 .owner
= THIS_MODULE
,
2087 .connect
= tcp_v6_connect
,
2088 .disconnect
= tcp_disconnect
,
2089 .accept
= inet_csk_accept
,
2091 .init
= tcp_v6_init_sock
,
2092 .destroy
= tcp_v6_destroy_sock
,
2093 .shutdown
= tcp_shutdown
,
2094 .setsockopt
= tcp_setsockopt
,
2095 .getsockopt
= tcp_getsockopt
,
2096 .recvmsg
= tcp_recvmsg
,
2097 .backlog_rcv
= tcp_v6_do_rcv
,
2098 .hash
= tcp_v6_hash
,
2099 .unhash
= inet_unhash
,
2100 .get_port
= inet_csk_get_port
,
2101 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2102 .sockets_allocated
= &tcp_sockets_allocated
,
2103 .memory_allocated
= &tcp_memory_allocated
,
2104 .memory_pressure
= &tcp_memory_pressure
,
2105 .orphan_count
= &tcp_orphan_count
,
2106 .sysctl_mem
= sysctl_tcp_mem
,
2107 .sysctl_wmem
= sysctl_tcp_wmem
,
2108 .sysctl_rmem
= sysctl_tcp_rmem
,
2109 .max_header
= MAX_TCP_HEADER
,
2110 .obj_size
= sizeof(struct tcp6_sock
),
2111 .twsk_prot
= &tcp6_timewait_sock_ops
,
2112 .rsk_prot
= &tcp6_request_sock_ops
,
2113 .h
.hashinfo
= &tcp_hashinfo
,
2114 #ifdef CONFIG_COMPAT
2115 .compat_setsockopt
= compat_tcp_setsockopt
,
2116 .compat_getsockopt
= compat_tcp_getsockopt
,
2120 static struct inet6_protocol tcpv6_protocol
= {
2121 .handler
= tcp_v6_rcv
,
2122 .err_handler
= tcp_v6_err
,
2123 .gso_send_check
= tcp_v6_gso_send_check
,
2124 .gso_segment
= tcp_tso_segment
,
2125 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2128 static struct inet_protosw tcpv6_protosw
= {
2129 .type
= SOCK_STREAM
,
2130 .protocol
= IPPROTO_TCP
,
2131 .prot
= &tcpv6_prot
,
2132 .ops
= &inet6_stream_ops
,
2135 .flags
= INET_PROTOSW_PERMANENT
|
2139 static int tcpv6_net_init(struct net
*net
)
2141 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2142 SOCK_RAW
, IPPROTO_TCP
, net
);
2145 static void tcpv6_net_exit(struct net
*net
)
2147 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2148 inet_twsk_purge(net
, &tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2151 static struct pernet_operations tcpv6_net_ops
= {
2152 .init
= tcpv6_net_init
,
2153 .exit
= tcpv6_net_exit
,
2156 int __init
tcpv6_init(void)
2160 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2164 /* register inet6 protocol */
2165 ret
= inet6_register_protosw(&tcpv6_protosw
);
2167 goto out_tcpv6_protocol
;
2169 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2171 goto out_tcpv6_protosw
;
2176 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2178 inet6_unregister_protosw(&tcpv6_protosw
);
2182 void tcpv6_exit(void)
2184 unregister_pernet_subsys(&tcpv6_net_ops
);
2185 inet6_unregister_protosw(&tcpv6_protosw
);
2186 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);