3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
74 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
75 static void tcp_v6_send_check(struct sock
*sk
, int len
,
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static struct inet_connection_sock_af_ops ipv6_mapped
;
81 static struct inet_connection_sock_af_ops ipv6_specific
;
82 #ifdef CONFIG_TCP_MD5SIG
83 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
87 static void tcp_v6_hash(struct sock
*sk
)
89 if (sk
->sk_state
!= TCP_CLOSE
) {
90 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
100 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
101 struct in6_addr
*saddr
,
102 struct in6_addr
*daddr
,
105 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
108 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
110 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
111 ipv6_hdr(skb
)->saddr
.s6_addr32
,
113 tcp_hdr(skb
)->source
);
116 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
119 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
120 struct inet_sock
*inet
= inet_sk(sk
);
121 struct inet_connection_sock
*icsk
= inet_csk(sk
);
122 struct ipv6_pinfo
*np
= inet6_sk(sk
);
123 struct tcp_sock
*tp
= tcp_sk(sk
);
124 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
126 struct dst_entry
*dst
;
130 if (addr_len
< SIN6_LEN_RFC2133
)
133 if (usin
->sin6_family
!= AF_INET6
)
134 return(-EAFNOSUPPORT
);
136 memset(&fl
, 0, sizeof(fl
));
139 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
140 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
141 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
142 struct ip6_flowlabel
*flowlabel
;
143 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
144 if (flowlabel
== NULL
)
146 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
147 fl6_sock_release(flowlabel
);
152 * connect() to INADDR_ANY means loopback (BSD'ism).
155 if(ipv6_addr_any(&usin
->sin6_addr
))
156 usin
->sin6_addr
.s6_addr
[15] = 0x1;
158 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
160 if(addr_type
& IPV6_ADDR_MULTICAST
)
163 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
164 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
165 usin
->sin6_scope_id
) {
166 /* If interface is set while binding, indices
169 if (sk
->sk_bound_dev_if
&&
170 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
173 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
176 /* Connect to link-local address requires an interface */
177 if (!sk
->sk_bound_dev_if
)
181 if (tp
->rx_opt
.ts_recent_stamp
&&
182 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
183 tp
->rx_opt
.ts_recent
= 0;
184 tp
->rx_opt
.ts_recent_stamp
= 0;
188 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
189 np
->flow_label
= fl
.fl6_flowlabel
;
195 if (addr_type
== IPV6_ADDR_MAPPED
) {
196 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
197 struct sockaddr_in sin
;
199 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
201 if (__ipv6_only_sock(sk
))
204 sin
.sin_family
= AF_INET
;
205 sin
.sin_port
= usin
->sin6_port
;
206 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
208 icsk
->icsk_af_ops
= &ipv6_mapped
;
209 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
210 #ifdef CONFIG_TCP_MD5SIG
211 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
214 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
217 icsk
->icsk_ext_hdr_len
= exthdrlen
;
218 icsk
->icsk_af_ops
= &ipv6_specific
;
219 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp
->af_specific
= &tcp_sock_ipv6_specific
;
225 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
227 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
234 if (!ipv6_addr_any(&np
->rcv_saddr
))
235 saddr
= &np
->rcv_saddr
;
237 fl
.proto
= IPPROTO_TCP
;
238 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
239 ipv6_addr_copy(&fl
.fl6_src
,
240 (saddr
? saddr
: &np
->saddr
));
241 fl
.oif
= sk
->sk_bound_dev_if
;
242 fl
.fl_ip_dport
= usin
->sin6_port
;
243 fl
.fl_ip_sport
= inet
->sport
;
245 if (np
->opt
&& np
->opt
->srcrt
) {
246 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
247 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
248 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
252 security_sk_classify_flow(sk
, &fl
);
254 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
258 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
260 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
)) < 0) {
262 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
269 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
272 /* set the source address */
273 ipv6_addr_copy(&np
->saddr
, saddr
);
274 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
276 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
277 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
279 icsk
->icsk_ext_hdr_len
= 0;
281 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
284 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
286 inet
->dport
= usin
->sin6_port
;
288 tcp_set_state(sk
, TCP_SYN_SENT
);
289 err
= inet6_hash_connect(&tcp_death_row
, sk
);
294 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
299 err
= tcp_connect(sk
);
306 tcp_set_state(sk
, TCP_CLOSE
);
310 sk
->sk_route_caps
= 0;
314 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
315 int type
, int code
, int offset
, __be32 info
)
317 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
318 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
319 struct ipv6_pinfo
*np
;
325 sk
= inet6_lookup(dev_net(skb
->dev
), &tcp_hashinfo
, &hdr
->daddr
,
326 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
329 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
333 if (sk
->sk_state
== TCP_TIME_WAIT
) {
334 inet_twsk_put(inet_twsk(sk
));
339 if (sock_owned_by_user(sk
))
340 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
342 if (sk
->sk_state
== TCP_CLOSE
)
346 seq
= ntohl(th
->seq
);
347 if (sk
->sk_state
!= TCP_LISTEN
&&
348 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
349 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
355 if (type
== ICMPV6_PKT_TOOBIG
) {
356 struct dst_entry
*dst
= NULL
;
358 if (sock_owned_by_user(sk
))
360 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
363 /* icmp should have updated the destination cache entry */
364 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
367 struct inet_sock
*inet
= inet_sk(sk
);
370 /* BUGGG_FUTURE: Again, it is not clear how
371 to handle rthdr case. Ignore this complexity
374 memset(&fl
, 0, sizeof(fl
));
375 fl
.proto
= IPPROTO_TCP
;
376 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
377 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
378 fl
.oif
= sk
->sk_bound_dev_if
;
379 fl
.fl_ip_dport
= inet
->dport
;
380 fl
.fl_ip_sport
= inet
->sport
;
381 security_skb_classify_flow(skb
, &fl
);
383 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
384 sk
->sk_err_soft
= -err
;
388 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
389 sk
->sk_err_soft
= -err
;
396 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
397 tcp_sync_mss(sk
, dst_mtu(dst
));
398 tcp_simple_retransmit(sk
);
399 } /* else let the usual retransmit timer handle it */
404 icmpv6_err_convert(type
, code
, &err
);
406 /* Might be for an request_sock */
407 switch (sk
->sk_state
) {
408 struct request_sock
*req
, **prev
;
410 if (sock_owned_by_user(sk
))
413 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
414 &hdr
->saddr
, inet6_iif(skb
));
418 /* ICMPs are not backlogged, hence we cannot get
419 * an established socket here.
421 BUG_TRAP(req
->sk
== NULL
);
423 if (seq
!= tcp_rsk(req
)->snt_isn
) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
428 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
432 case TCP_SYN_RECV
: /* Cannot happen.
433 It can, it SYNs are crossed. --ANK */
434 if (!sock_owned_by_user(sk
)) {
436 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
440 sk
->sk_err_soft
= err
;
444 if (!sock_owned_by_user(sk
) && np
->recverr
) {
446 sk
->sk_error_report(sk
);
448 sk
->sk_err_soft
= err
;
456 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
458 struct inet6_request_sock
*treq
= inet6_rsk(req
);
459 struct ipv6_pinfo
*np
= inet6_sk(sk
);
460 struct sk_buff
* skb
;
461 struct ipv6_txoptions
*opt
= NULL
;
462 struct in6_addr
* final_p
= NULL
, final
;
464 struct dst_entry
*dst
;
467 memset(&fl
, 0, sizeof(fl
));
468 fl
.proto
= IPPROTO_TCP
;
469 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
470 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
471 fl
.fl6_flowlabel
= 0;
473 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
474 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
475 security_req_classify_flow(req
, &fl
);
478 if (opt
&& opt
->srcrt
) {
479 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
480 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
481 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
485 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
489 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
490 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
493 skb
= tcp_make_synack(sk
, dst
, req
);
495 struct tcphdr
*th
= tcp_hdr(skb
);
497 th
->check
= tcp_v6_check(th
, skb
->len
,
498 &treq
->loc_addr
, &treq
->rmt_addr
,
499 csum_partial((char *)th
, skb
->len
, skb
->csum
));
501 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
502 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
503 err
= net_xmit_eval(err
);
507 if (opt
&& opt
!= np
->opt
)
508 sock_kfree_s(sk
, opt
, opt
->tot_len
);
513 static inline void syn_flood_warning(struct sk_buff
*skb
)
515 #ifdef CONFIG_SYN_COOKIES
516 if (sysctl_tcp_syncookies
)
518 "TCPv6: Possible SYN flooding on port %d. "
519 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
527 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
529 if (inet6_rsk(req
)->pktopts
)
530 kfree_skb(inet6_rsk(req
)->pktopts
);
533 #ifdef CONFIG_TCP_MD5SIG
534 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
535 struct in6_addr
*addr
)
537 struct tcp_sock
*tp
= tcp_sk(sk
);
542 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
545 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
546 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, addr
) == 0)
547 return &tp
->md5sig_info
->keys6
[i
].base
;
552 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
553 struct sock
*addr_sk
)
555 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
558 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
559 struct request_sock
*req
)
561 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
564 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
565 char *newkey
, u8 newkeylen
)
567 /* Add key to the list */
568 struct tcp_md5sig_key
*key
;
569 struct tcp_sock
*tp
= tcp_sk(sk
);
570 struct tcp6_md5sig_key
*keys
;
572 key
= tcp_v6_md5_do_lookup(sk
, peer
);
574 /* modify existing entry - just update that one */
577 key
->keylen
= newkeylen
;
579 /* reallocate new list if current one is full. */
580 if (!tp
->md5sig_info
) {
581 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
582 if (!tp
->md5sig_info
) {
586 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
588 if (tcp_alloc_md5sig_pool() == NULL
) {
592 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
593 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
594 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
597 tcp_free_md5sig_pool();
602 if (tp
->md5sig_info
->entries6
)
603 memmove(keys
, tp
->md5sig_info
->keys6
,
604 (sizeof (tp
->md5sig_info
->keys6
[0]) *
605 tp
->md5sig_info
->entries6
));
607 kfree(tp
->md5sig_info
->keys6
);
608 tp
->md5sig_info
->keys6
= keys
;
609 tp
->md5sig_info
->alloced6
++;
612 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
614 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
615 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
617 tp
->md5sig_info
->entries6
++;
622 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
623 u8
*newkey
, __u8 newkeylen
)
625 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
629 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
631 struct tcp_sock
*tp
= tcp_sk(sk
);
634 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
635 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, peer
) == 0) {
637 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
638 tp
->md5sig_info
->entries6
--;
640 if (tp
->md5sig_info
->entries6
== 0) {
641 kfree(tp
->md5sig_info
->keys6
);
642 tp
->md5sig_info
->keys6
= NULL
;
643 tp
->md5sig_info
->alloced6
= 0;
645 /* shrink the database */
646 if (tp
->md5sig_info
->entries6
!= i
)
647 memmove(&tp
->md5sig_info
->keys6
[i
],
648 &tp
->md5sig_info
->keys6
[i
+1],
649 (tp
->md5sig_info
->entries6
- i
)
650 * sizeof (tp
->md5sig_info
->keys6
[0]));
652 tcp_free_md5sig_pool();
659 static void tcp_v6_clear_md5_list (struct sock
*sk
)
661 struct tcp_sock
*tp
= tcp_sk(sk
);
664 if (tp
->md5sig_info
->entries6
) {
665 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
666 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
667 tp
->md5sig_info
->entries6
= 0;
668 tcp_free_md5sig_pool();
671 kfree(tp
->md5sig_info
->keys6
);
672 tp
->md5sig_info
->keys6
= NULL
;
673 tp
->md5sig_info
->alloced6
= 0;
675 if (tp
->md5sig_info
->entries4
) {
676 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
677 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
678 tp
->md5sig_info
->entries4
= 0;
679 tcp_free_md5sig_pool();
682 kfree(tp
->md5sig_info
->keys4
);
683 tp
->md5sig_info
->keys4
= NULL
;
684 tp
->md5sig_info
->alloced4
= 0;
687 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
690 struct tcp_md5sig cmd
;
691 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
694 if (optlen
< sizeof(cmd
))
697 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
700 if (sin6
->sin6_family
!= AF_INET6
)
703 if (!cmd
.tcpm_keylen
) {
704 if (!tcp_sk(sk
)->md5sig_info
)
706 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
707 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
708 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
711 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
714 if (!tcp_sk(sk
)->md5sig_info
) {
715 struct tcp_sock
*tp
= tcp_sk(sk
);
716 struct tcp_md5sig_info
*p
;
718 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
723 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
726 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
729 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
730 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
731 newkey
, cmd
.tcpm_keylen
);
733 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
736 static int tcp_v6_do_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
737 struct in6_addr
*saddr
,
738 struct in6_addr
*daddr
,
739 struct tcphdr
*th
, int protocol
,
742 struct scatterlist sg
[4];
746 struct tcp_md5sig_pool
*hp
;
747 struct tcp6_pseudohdr
*bp
;
748 struct hash_desc
*desc
;
750 unsigned int nbytes
= 0;
752 hp
= tcp_get_md5sig_pool();
754 printk(KERN_WARNING
"%s(): hash pool not found...\n", __func__
);
755 goto clear_hash_noput
;
757 bp
= &hp
->md5_blk
.ip6
;
758 desc
= &hp
->md5_desc
;
760 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp
->saddr
, saddr
);
762 ipv6_addr_copy(&bp
->daddr
, daddr
);
763 bp
->len
= htonl(tcplen
);
764 bp
->protocol
= htonl(protocol
);
766 sg_init_table(sg
, 4);
768 sg_set_buf(&sg
[block
++], bp
, sizeof(*bp
));
769 nbytes
+= sizeof(*bp
);
771 /* 2. TCP header, excluding options */
774 sg_set_buf(&sg
[block
++], th
, sizeof(*th
));
775 nbytes
+= sizeof(*th
);
777 /* 3. TCP segment data (if any) */
778 data_len
= tcplen
- (th
->doff
<< 2);
780 u8
*data
= (u8
*)th
+ (th
->doff
<< 2);
781 sg_set_buf(&sg
[block
++], data
, data_len
);
786 sg_set_buf(&sg
[block
++], key
->key
, key
->keylen
);
787 nbytes
+= key
->keylen
;
789 sg_mark_end(&sg
[block
- 1]);
791 /* Now store the hash into the packet */
792 err
= crypto_hash_init(desc
);
794 printk(KERN_WARNING
"%s(): hash_init failed\n", __func__
);
797 err
= crypto_hash_update(desc
, sg
, nbytes
);
799 printk(KERN_WARNING
"%s(): hash_update failed\n", __func__
);
802 err
= crypto_hash_final(desc
, md5_hash
);
804 printk(KERN_WARNING
"%s(): hash_final failed\n", __func__
);
808 /* Reset header, and free up the crypto */
809 tcp_put_md5sig_pool();
814 tcp_put_md5sig_pool();
816 memset(md5_hash
, 0, 16);
820 static int tcp_v6_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
822 struct dst_entry
*dst
,
823 struct request_sock
*req
,
824 struct tcphdr
*th
, int protocol
,
827 struct in6_addr
*saddr
, *daddr
;
830 saddr
= &inet6_sk(sk
)->saddr
;
831 daddr
= &inet6_sk(sk
)->daddr
;
833 saddr
= &inet6_rsk(req
)->loc_addr
;
834 daddr
= &inet6_rsk(req
)->rmt_addr
;
836 return tcp_v6_do_calc_md5_hash(md5_hash
, key
,
838 th
, protocol
, tcplen
);
841 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
843 __u8
*hash_location
= NULL
;
844 struct tcp_md5sig_key
*hash_expected
;
845 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
846 struct tcphdr
*th
= tcp_hdr(skb
);
847 int length
= (th
->doff
<< 2) - sizeof (*th
);
852 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
854 /* If the TCP option is too short, we can short cut */
855 if (length
< TCPOLEN_MD5SIG
)
856 return hash_expected
? 1 : 0;
872 if (opsize
< 2 || opsize
> length
)
874 if (opcode
== TCPOPT_MD5SIG
) {
884 /* do we have a hash as expected? */
885 if (!hash_expected
) {
888 if (net_ratelimit()) {
889 printk(KERN_INFO
"MD5 Hash NOT expected but found "
890 "(" NIP6_FMT
", %u)->"
891 "(" NIP6_FMT
", %u)\n",
892 NIP6(ip6h
->saddr
), ntohs(th
->source
),
893 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
898 if (!hash_location
) {
899 if (net_ratelimit()) {
900 printk(KERN_INFO
"MD5 Hash expected but NOT found "
901 "(" NIP6_FMT
", %u)->"
902 "(" NIP6_FMT
", %u)\n",
903 NIP6(ip6h
->saddr
), ntohs(th
->source
),
904 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
909 /* check the signature */
910 genhash
= tcp_v6_do_calc_md5_hash(newhash
,
912 &ip6h
->saddr
, &ip6h
->daddr
,
915 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
916 if (net_ratelimit()) {
917 printk(KERN_INFO
"MD5 Hash %s for "
918 "(" NIP6_FMT
", %u)->"
919 "(" NIP6_FMT
", %u)\n",
920 genhash
? "failed" : "mismatch",
921 NIP6(ip6h
->saddr
), ntohs(th
->source
),
922 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
930 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
932 .obj_size
= sizeof(struct tcp6_request_sock
),
933 .rtx_syn_ack
= tcp_v6_send_synack
,
934 .send_ack
= tcp_v6_reqsk_send_ack
,
935 .destructor
= tcp_v6_reqsk_destructor
,
936 .send_reset
= tcp_v6_send_reset
939 #ifdef CONFIG_TCP_MD5SIG
940 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
941 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
945 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
946 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
947 .twsk_unique
= tcp_twsk_unique
,
948 .twsk_destructor
= tcp_twsk_destructor
,
951 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
953 struct ipv6_pinfo
*np
= inet6_sk(sk
);
954 struct tcphdr
*th
= tcp_hdr(skb
);
956 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
957 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
958 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
959 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
961 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
962 csum_partial((char *)th
, th
->doff
<<2,
967 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
969 struct ipv6hdr
*ipv6h
;
972 if (!pskb_may_pull(skb
, sizeof(*th
)))
975 ipv6h
= ipv6_hdr(skb
);
979 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
981 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
982 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
983 skb
->ip_summed
= CHECKSUM_PARTIAL
;
987 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
989 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
990 struct sk_buff
*buff
;
992 struct net
*net
= dev_net(skb
->dst
->dev
);
993 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
994 unsigned int tot_len
= sizeof(*th
);
995 #ifdef CONFIG_TCP_MD5SIG
996 struct tcp_md5sig_key
*key
;
1002 if (!ipv6_unicast_destination(skb
))
1005 #ifdef CONFIG_TCP_MD5SIG
1007 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1012 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1016 * We need to grab some memory, and put together an RST,
1017 * and then put it into the queue to be sent.
1020 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1025 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1027 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1029 /* Swap the send and the receive. */
1030 memset(t1
, 0, sizeof(*t1
));
1031 t1
->dest
= th
->source
;
1032 t1
->source
= th
->dest
;
1033 t1
->doff
= tot_len
/ 4;
1037 t1
->seq
= th
->ack_seq
;
1040 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
1041 + skb
->len
- (th
->doff
<<2));
1044 #ifdef CONFIG_TCP_MD5SIG
1046 __be32
*opt
= (__be32
*)(t1
+ 1);
1047 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1048 (TCPOPT_NOP
<< 16) |
1049 (TCPOPT_MD5SIG
<< 8) |
1051 tcp_v6_do_calc_md5_hash((__u8
*)&opt
[1], key
,
1052 &ipv6_hdr(skb
)->daddr
,
1053 &ipv6_hdr(skb
)->saddr
,
1054 t1
, IPPROTO_TCP
, tot_len
);
1058 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
1060 memset(&fl
, 0, sizeof(fl
));
1061 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1062 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1064 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1065 sizeof(*t1
), IPPROTO_TCP
,
1068 fl
.proto
= IPPROTO_TCP
;
1069 fl
.oif
= inet6_iif(skb
);
1070 fl
.fl_ip_dport
= t1
->dest
;
1071 fl
.fl_ip_sport
= t1
->source
;
1072 security_skb_classify_flow(skb
, &fl
);
1074 /* Pass a socket to ip6_dst_lookup either it is for RST
1075 * Underlying function will use this to retrieve the network
1078 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1080 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1081 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1082 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1083 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
1091 static void tcp_v6_send_ack(struct tcp_timewait_sock
*tw
,
1092 struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
1094 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
1095 struct sk_buff
*buff
;
1097 struct net
*net
= dev_net(skb
->dev
);
1098 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
1099 unsigned int tot_len
= sizeof(struct tcphdr
);
1101 #ifdef CONFIG_TCP_MD5SIG
1102 struct tcp_md5sig_key
*key
;
1103 struct tcp_md5sig_key tw_key
;
1106 #ifdef CONFIG_TCP_MD5SIG
1107 if (!tw
&& skb
->sk
) {
1108 key
= tcp_v6_md5_do_lookup(skb
->sk
, &ipv6_hdr(skb
)->daddr
);
1109 } else if (tw
&& tw
->tw_md5_keylen
) {
1110 tw_key
.key
= tw
->tw_md5_key
;
1111 tw_key
.keylen
= tw
->tw_md5_keylen
;
1119 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1120 #ifdef CONFIG_TCP_MD5SIG
1122 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1125 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1130 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1132 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
1134 /* Swap the send and the receive. */
1135 memset(t1
, 0, sizeof(*t1
));
1136 t1
->dest
= th
->source
;
1137 t1
->source
= th
->dest
;
1138 t1
->doff
= tot_len
/4;
1139 t1
->seq
= htonl(seq
);
1140 t1
->ack_seq
= htonl(ack
);
1142 t1
->window
= htons(win
);
1144 topt
= (__be32
*)(t1
+ 1);
1147 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1148 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1149 *topt
++ = htonl(tcp_time_stamp
);
1153 #ifdef CONFIG_TCP_MD5SIG
1155 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1156 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1157 tcp_v6_do_calc_md5_hash((__u8
*)topt
, key
,
1158 &ipv6_hdr(skb
)->daddr
,
1159 &ipv6_hdr(skb
)->saddr
,
1160 t1
, IPPROTO_TCP
, tot_len
);
1164 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1166 memset(&fl
, 0, sizeof(fl
));
1167 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1168 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1170 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1171 tot_len
, IPPROTO_TCP
,
1174 fl
.proto
= IPPROTO_TCP
;
1175 fl
.oif
= inet6_iif(skb
);
1176 fl
.fl_ip_dport
= t1
->dest
;
1177 fl
.fl_ip_sport
= t1
->source
;
1178 security_skb_classify_flow(skb
, &fl
);
1180 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1181 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1182 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1183 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1191 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1193 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1194 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1196 tcp_v6_send_ack(tcptw
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1197 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1198 tcptw
->tw_ts_recent
);
1203 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
1205 tcp_v6_send_ack(NULL
, skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
1209 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1211 struct request_sock
*req
, **prev
;
1212 const struct tcphdr
*th
= tcp_hdr(skb
);
1215 /* Find possible connection requests. */
1216 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1217 &ipv6_hdr(skb
)->saddr
,
1218 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1220 return tcp_check_req(sk
, skb
, req
, prev
);
1222 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1223 &ipv6_hdr(skb
)->saddr
, th
->source
,
1224 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1227 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1231 inet_twsk_put(inet_twsk(nsk
));
1235 #ifdef CONFIG_SYN_COOKIES
1236 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1237 sk
= cookie_v6_check(sk
, skb
);
1242 /* FIXME: this is substantially similar to the ipv4 code.
1243 * Can some kind of merge be done? -- erics
1245 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1247 struct inet6_request_sock
*treq
;
1248 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1249 struct tcp_options_received tmp_opt
;
1250 struct tcp_sock
*tp
= tcp_sk(sk
);
1251 struct request_sock
*req
= NULL
;
1252 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1253 #ifdef CONFIG_SYN_COOKIES
1254 int want_cookie
= 0;
1256 #define want_cookie 0
1259 if (skb
->protocol
== htons(ETH_P_IP
))
1260 return tcp_v4_conn_request(sk
, skb
);
1262 if (!ipv6_unicast_destination(skb
))
1265 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1266 if (net_ratelimit())
1267 syn_flood_warning(skb
);
1268 #ifdef CONFIG_SYN_COOKIES
1269 if (sysctl_tcp_syncookies
)
1276 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1279 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1283 #ifdef CONFIG_TCP_MD5SIG
1284 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1287 tcp_clear_options(&tmp_opt
);
1288 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1289 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1291 tcp_parse_options(skb
, &tmp_opt
, 0);
1294 tcp_clear_options(&tmp_opt
);
1295 tmp_opt
.saw_tstamp
= 0;
1298 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1299 tcp_openreq_init(req
, &tmp_opt
, skb
);
1301 treq
= inet6_rsk(req
);
1302 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1303 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1304 treq
->pktopts
= NULL
;
1306 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1309 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1311 if (ipv6_opt_accepted(sk
, skb
) ||
1312 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1313 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1314 atomic_inc(&skb
->users
);
1315 treq
->pktopts
= skb
;
1317 treq
->iif
= sk
->sk_bound_dev_if
;
1319 /* So that link locals have meaning */
1320 if (!sk
->sk_bound_dev_if
&&
1321 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1322 treq
->iif
= inet6_iif(skb
);
1324 isn
= tcp_v6_init_sequence(skb
);
1327 tcp_rsk(req
)->snt_isn
= isn
;
1329 security_inet_conn_request(sk
, skb
, req
);
1331 if (tcp_v6_send_synack(sk
, req
))
1335 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1343 return 0; /* don't send reset */
1346 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1347 struct request_sock
*req
,
1348 struct dst_entry
*dst
)
1350 struct inet6_request_sock
*treq
= inet6_rsk(req
);
1351 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1352 struct tcp6_sock
*newtcp6sk
;
1353 struct inet_sock
*newinet
;
1354 struct tcp_sock
*newtp
;
1356 struct ipv6_txoptions
*opt
;
1357 #ifdef CONFIG_TCP_MD5SIG
1358 struct tcp_md5sig_key
*key
;
1361 if (skb
->protocol
== htons(ETH_P_IP
)) {
1366 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1371 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1372 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1374 newinet
= inet_sk(newsk
);
1375 newnp
= inet6_sk(newsk
);
1376 newtp
= tcp_sk(newsk
);
1378 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1380 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1383 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1386 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1388 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1389 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1390 #ifdef CONFIG_TCP_MD5SIG
1391 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1394 newnp
->pktoptions
= NULL
;
1396 newnp
->mcast_oif
= inet6_iif(skb
);
1397 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1400 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1401 * here, tcp_create_openreq_child now does this for us, see the comment in
1402 * that function for the gory details. -acme
1405 /* It is tricky place. Until this moment IPv4 tcp
1406 worked with IPv6 icsk.icsk_af_ops.
1409 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1416 if (sk_acceptq_is_full(sk
))
1420 struct in6_addr
*final_p
= NULL
, final
;
1423 memset(&fl
, 0, sizeof(fl
));
1424 fl
.proto
= IPPROTO_TCP
;
1425 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1426 if (opt
&& opt
->srcrt
) {
1427 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1428 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1429 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1432 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1433 fl
.oif
= sk
->sk_bound_dev_if
;
1434 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1435 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1436 security_req_classify_flow(req
, &fl
);
1438 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1442 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1444 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1448 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1453 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1454 * count here, tcp_create_openreq_child now does this for us, see the
1455 * comment in that function for the gory details. -acme
1458 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1459 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1461 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1462 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1464 newtp
= tcp_sk(newsk
);
1465 newinet
= inet_sk(newsk
);
1466 newnp
= inet6_sk(newsk
);
1468 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1470 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1471 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1472 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1473 newsk
->sk_bound_dev_if
= treq
->iif
;
1475 /* Now IPv6 options...
1477 First: no IPv4 options.
1479 newinet
->opt
= NULL
;
1480 newnp
->ipv6_fl_list
= NULL
;
1483 newnp
->rxopt
.all
= np
->rxopt
.all
;
1485 /* Clone pktoptions received with SYN */
1486 newnp
->pktoptions
= NULL
;
1487 if (treq
->pktopts
!= NULL
) {
1488 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1489 kfree_skb(treq
->pktopts
);
1490 treq
->pktopts
= NULL
;
1491 if (newnp
->pktoptions
)
1492 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1495 newnp
->mcast_oif
= inet6_iif(skb
);
1496 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1498 /* Clone native IPv6 options from listening socket (if any)
1500 Yes, keeping reference count would be much more clever,
1501 but we make one more one thing there: reattach optmem
1505 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1507 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1510 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1512 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1513 newnp
->opt
->opt_flen
);
1515 tcp_mtup_init(newsk
);
1516 tcp_sync_mss(newsk
, dst_mtu(dst
));
1517 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1518 tcp_initialize_rcv_mss(newsk
);
1520 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1522 #ifdef CONFIG_TCP_MD5SIG
1523 /* Copy over the MD5 key from the original socket */
1524 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1525 /* We're using one, so create a matching key
1526 * on the newsk structure. If we fail to get
1527 * memory, then we end up not copying the key
1530 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1532 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1533 newkey
, key
->keylen
);
1537 __inet6_hash(newsk
);
1538 inet_inherit_port(sk
, newsk
);
1543 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1545 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1546 if (opt
&& opt
!= np
->opt
)
1547 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1552 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1554 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1555 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1556 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1557 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1562 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1563 &ipv6_hdr(skb
)->saddr
,
1564 &ipv6_hdr(skb
)->daddr
, 0));
1566 if (skb
->len
<= 76) {
1567 return __skb_checksum_complete(skb
);
1572 /* The socket must have it's spinlock held when we get
1575 * We have a potential double-lock case here, so even when
1576 * doing backlog processing we use the BH locking scheme.
1577 * This is because we cannot sleep with the original spinlock
1580 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1582 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1583 struct tcp_sock
*tp
;
1584 struct sk_buff
*opt_skb
= NULL
;
1586 /* Imagine: socket is IPv6. IPv4 packet arrives,
1587 goes to IPv4 receive handler and backlogged.
1588 From backlog it always goes here. Kerboom...
1589 Fortunately, tcp_rcv_established and rcv_established
1590 handle them correctly, but it is not case with
1591 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1594 if (skb
->protocol
== htons(ETH_P_IP
))
1595 return tcp_v4_do_rcv(sk
, skb
);
1597 #ifdef CONFIG_TCP_MD5SIG
1598 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1602 if (sk_filter(sk
, skb
))
1606 * socket locking is here for SMP purposes as backlog rcv
1607 * is currently called with bh processing disabled.
1610 /* Do Stevens' IPV6_PKTOPTIONS.
1612 Yes, guys, it is the only place in our code, where we
1613 may make it not affecting IPv4.
1614 The rest of code is protocol independent,
1615 and I do not like idea to uglify IPv4.
1617 Actually, all the idea behind IPV6_PKTOPTIONS
1618 looks not very well thought. For now we latch
1619 options, received in the last packet, enqueued
1620 by tcp. Feel free to propose better solution.
1624 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1626 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1627 TCP_CHECK_TIMER(sk
);
1628 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1630 TCP_CHECK_TIMER(sk
);
1632 goto ipv6_pktoptions
;
1636 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1639 if (sk
->sk_state
== TCP_LISTEN
) {
1640 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1645 * Queue it on the new socket if the new socket is active,
1646 * otherwise we just shortcircuit this and continue with
1650 if (tcp_child_process(sk
, nsk
, skb
))
1653 __kfree_skb(opt_skb
);
1658 TCP_CHECK_TIMER(sk
);
1659 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1661 TCP_CHECK_TIMER(sk
);
1663 goto ipv6_pktoptions
;
1667 tcp_v6_send_reset(sk
, skb
);
1670 __kfree_skb(opt_skb
);
1674 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1679 /* Do you ask, what is it?
1681 1. skb was enqueued by tcp.
1682 2. skb is added to tail of read queue, rather than out of order.
1683 3. socket is not in passive state.
1684 4. Finally, it really contains options, which user wants to receive.
1687 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1688 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1689 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1690 np
->mcast_oif
= inet6_iif(opt_skb
);
1691 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1692 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1693 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1694 skb_set_owner_r(opt_skb
, sk
);
1695 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1697 __kfree_skb(opt_skb
);
1698 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1707 static int tcp_v6_rcv(struct sk_buff
*skb
)
1713 if (skb
->pkt_type
!= PACKET_HOST
)
1717 * Count it even if it's bad.
1719 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1721 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1726 if (th
->doff
< sizeof(struct tcphdr
)/4)
1728 if (!pskb_may_pull(skb
, th
->doff
*4))
1731 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1735 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1736 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1737 skb
->len
- th
->doff
*4);
1738 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1739 TCP_SKB_CB(skb
)->when
= 0;
1740 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1741 TCP_SKB_CB(skb
)->sacked
= 0;
1743 sk
= __inet6_lookup(dev_net(skb
->dev
), &tcp_hashinfo
,
1744 &ipv6_hdr(skb
)->saddr
, th
->source
,
1745 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
),
1752 if (sk
->sk_state
== TCP_TIME_WAIT
)
1755 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1756 goto discard_and_relse
;
1758 if (sk_filter(sk
, skb
))
1759 goto discard_and_relse
;
1763 bh_lock_sock_nested(sk
);
1765 if (!sock_owned_by_user(sk
)) {
1766 #ifdef CONFIG_NET_DMA
1767 struct tcp_sock
*tp
= tcp_sk(sk
);
1768 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1769 tp
->ucopy
.dma_chan
= get_softnet_dma();
1770 if (tp
->ucopy
.dma_chan
)
1771 ret
= tcp_v6_do_rcv(sk
, skb
);
1775 if (!tcp_prequeue(sk
, skb
))
1776 ret
= tcp_v6_do_rcv(sk
, skb
);
1779 sk_add_backlog(sk
, skb
);
1783 return ret
? -1 : 0;
1786 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1789 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1791 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1793 tcp_v6_send_reset(NULL
, skb
);
1810 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1811 inet_twsk_put(inet_twsk(sk
));
1815 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1816 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1817 inet_twsk_put(inet_twsk(sk
));
1821 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1826 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1827 &ipv6_hdr(skb
)->daddr
,
1828 ntohs(th
->dest
), inet6_iif(skb
));
1830 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1831 inet_twsk_deschedule(tw
, &tcp_death_row
);
1836 /* Fall through to ACK */
1839 tcp_v6_timewait_ack(sk
, skb
);
1843 case TCP_TW_SUCCESS
:;
1848 static int tcp_v6_remember_stamp(struct sock
*sk
)
1850 /* Alas, not yet... */
1854 static struct inet_connection_sock_af_ops ipv6_specific
= {
1855 .queue_xmit
= inet6_csk_xmit
,
1856 .send_check
= tcp_v6_send_check
,
1857 .rebuild_header
= inet6_sk_rebuild_header
,
1858 .conn_request
= tcp_v6_conn_request
,
1859 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1860 .remember_stamp
= tcp_v6_remember_stamp
,
1861 .net_header_len
= sizeof(struct ipv6hdr
),
1862 .setsockopt
= ipv6_setsockopt
,
1863 .getsockopt
= ipv6_getsockopt
,
1864 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1865 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1866 .bind_conflict
= inet6_csk_bind_conflict
,
1867 #ifdef CONFIG_COMPAT
1868 .compat_setsockopt
= compat_ipv6_setsockopt
,
1869 .compat_getsockopt
= compat_ipv6_getsockopt
,
1873 #ifdef CONFIG_TCP_MD5SIG
1874 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1875 .md5_lookup
= tcp_v6_md5_lookup
,
1876 .calc_md5_hash
= tcp_v6_calc_md5_hash
,
1877 .md5_add
= tcp_v6_md5_add_func
,
1878 .md5_parse
= tcp_v6_parse_md5_keys
,
1883 * TCP over IPv4 via INET6 API
1886 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1887 .queue_xmit
= ip_queue_xmit
,
1888 .send_check
= tcp_v4_send_check
,
1889 .rebuild_header
= inet_sk_rebuild_header
,
1890 .conn_request
= tcp_v6_conn_request
,
1891 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1892 .remember_stamp
= tcp_v4_remember_stamp
,
1893 .net_header_len
= sizeof(struct iphdr
),
1894 .setsockopt
= ipv6_setsockopt
,
1895 .getsockopt
= ipv6_getsockopt
,
1896 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1897 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1898 .bind_conflict
= inet6_csk_bind_conflict
,
1899 #ifdef CONFIG_COMPAT
1900 .compat_setsockopt
= compat_ipv6_setsockopt
,
1901 .compat_getsockopt
= compat_ipv6_getsockopt
,
1905 #ifdef CONFIG_TCP_MD5SIG
1906 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1907 .md5_lookup
= tcp_v4_md5_lookup
,
1908 .calc_md5_hash
= tcp_v4_calc_md5_hash
,
1909 .md5_add
= tcp_v6_md5_add_func
,
1910 .md5_parse
= tcp_v6_parse_md5_keys
,
1914 /* NOTE: A lot of things set to zero explicitly by call to
1915 * sk_alloc() so need not be done here.
1917 static int tcp_v6_init_sock(struct sock
*sk
)
1919 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1920 struct tcp_sock
*tp
= tcp_sk(sk
);
1922 skb_queue_head_init(&tp
->out_of_order_queue
);
1923 tcp_init_xmit_timers(sk
);
1924 tcp_prequeue_init(tp
);
1926 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1927 tp
->mdev
= TCP_TIMEOUT_INIT
;
1929 /* So many TCP implementations out there (incorrectly) count the
1930 * initial SYN frame in their delayed-ACK and congestion control
1931 * algorithms that we must have the following bandaid to talk
1932 * efficiently to them. -DaveM
1936 /* See draft-stevens-tcpca-spec-01 for discussion of the
1937 * initialization of these values.
1939 tp
->snd_ssthresh
= 0x7fffffff;
1940 tp
->snd_cwnd_clamp
= ~0;
1941 tp
->mss_cache
= 536;
1943 tp
->reordering
= sysctl_tcp_reordering
;
1945 sk
->sk_state
= TCP_CLOSE
;
1947 icsk
->icsk_af_ops
= &ipv6_specific
;
1948 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1949 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1950 sk
->sk_write_space
= sk_stream_write_space
;
1951 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1953 #ifdef CONFIG_TCP_MD5SIG
1954 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1957 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1958 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1960 atomic_inc(&tcp_sockets_allocated
);
1965 static int tcp_v6_destroy_sock(struct sock
*sk
)
1967 #ifdef CONFIG_TCP_MD5SIG
1968 /* Clean up the MD5 key list */
1969 if (tcp_sk(sk
)->md5sig_info
)
1970 tcp_v6_clear_md5_list(sk
);
1972 tcp_v4_destroy_sock(sk
);
1973 return inet6_destroy_sock(sk
);
1976 #ifdef CONFIG_PROC_FS
1977 /* Proc filesystem TCPv6 sock list dumping. */
1978 static void get_openreq6(struct seq_file
*seq
,
1979 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1981 int ttd
= req
->expires
- jiffies
;
1982 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1983 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1989 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1990 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1992 src
->s6_addr32
[0], src
->s6_addr32
[1],
1993 src
->s6_addr32
[2], src
->s6_addr32
[3],
1994 ntohs(inet_sk(sk
)->sport
),
1995 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1996 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1997 ntohs(inet_rsk(req
)->rmt_port
),
1999 0,0, /* could print option size, but that is af dependent. */
2000 1, /* timers active (only the expire timer) */
2001 jiffies_to_clock_t(ttd
),
2004 0, /* non standard timer */
2005 0, /* open_requests have no inode */
2009 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2011 struct in6_addr
*dest
, *src
;
2014 unsigned long timer_expires
;
2015 struct inet_sock
*inet
= inet_sk(sp
);
2016 struct tcp_sock
*tp
= tcp_sk(sp
);
2017 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2018 struct ipv6_pinfo
*np
= inet6_sk(sp
);
2021 src
= &np
->rcv_saddr
;
2022 destp
= ntohs(inet
->dport
);
2023 srcp
= ntohs(inet
->sport
);
2025 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2027 timer_expires
= icsk
->icsk_timeout
;
2028 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2030 timer_expires
= icsk
->icsk_timeout
;
2031 } else if (timer_pending(&sp
->sk_timer
)) {
2033 timer_expires
= sp
->sk_timer
.expires
;
2036 timer_expires
= jiffies
;
2040 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2041 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2043 src
->s6_addr32
[0], src
->s6_addr32
[1],
2044 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2045 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2046 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2048 tp
->write_seq
-tp
->snd_una
,
2049 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2051 jiffies_to_clock_t(timer_expires
- jiffies
),
2052 icsk
->icsk_retransmits
,
2054 icsk
->icsk_probes_out
,
2056 atomic_read(&sp
->sk_refcnt
), sp
,
2059 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2060 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2064 static void get_timewait6_sock(struct seq_file
*seq
,
2065 struct inet_timewait_sock
*tw
, int i
)
2067 struct in6_addr
*dest
, *src
;
2069 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2070 int ttd
= tw
->tw_ttd
- jiffies
;
2075 dest
= &tw6
->tw_v6_daddr
;
2076 src
= &tw6
->tw_v6_rcv_saddr
;
2077 destp
= ntohs(tw
->tw_dport
);
2078 srcp
= ntohs(tw
->tw_sport
);
2081 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2082 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2084 src
->s6_addr32
[0], src
->s6_addr32
[1],
2085 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2086 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2087 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2088 tw
->tw_substate
, 0, 0,
2089 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2090 atomic_read(&tw
->tw_refcnt
), tw
);
2093 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2095 struct tcp_iter_state
*st
;
2097 if (v
== SEQ_START_TOKEN
) {
2102 "st tx_queue rx_queue tr tm->when retrnsmt"
2103 " uid timeout inode\n");
2108 switch (st
->state
) {
2109 case TCP_SEQ_STATE_LISTENING
:
2110 case TCP_SEQ_STATE_ESTABLISHED
:
2111 get_tcp6_sock(seq
, v
, st
->num
);
2113 case TCP_SEQ_STATE_OPENREQ
:
2114 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2116 case TCP_SEQ_STATE_TIME_WAIT
:
2117 get_timewait6_sock(seq
, v
, st
->num
);
2124 static struct file_operations tcp6_seq_fops
;
2125 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2126 .owner
= THIS_MODULE
,
2129 .seq_show
= tcp6_seq_show
,
2130 .seq_fops
= &tcp6_seq_fops
,
2133 int tcp6_proc_init(struct net
*net
)
2135 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2138 void tcp6_proc_exit(struct net
*net
)
2140 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2144 struct proto tcpv6_prot
= {
2146 .owner
= THIS_MODULE
,
2148 .connect
= tcp_v6_connect
,
2149 .disconnect
= tcp_disconnect
,
2150 .accept
= inet_csk_accept
,
2152 .init
= tcp_v6_init_sock
,
2153 .destroy
= tcp_v6_destroy_sock
,
2154 .shutdown
= tcp_shutdown
,
2155 .setsockopt
= tcp_setsockopt
,
2156 .getsockopt
= tcp_getsockopt
,
2157 .recvmsg
= tcp_recvmsg
,
2158 .backlog_rcv
= tcp_v6_do_rcv
,
2159 .hash
= tcp_v6_hash
,
2160 .unhash
= inet_unhash
,
2161 .get_port
= inet_csk_get_port
,
2162 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2163 .sockets_allocated
= &tcp_sockets_allocated
,
2164 .memory_allocated
= &tcp_memory_allocated
,
2165 .memory_pressure
= &tcp_memory_pressure
,
2166 .orphan_count
= &tcp_orphan_count
,
2167 .sysctl_mem
= sysctl_tcp_mem
,
2168 .sysctl_wmem
= sysctl_tcp_wmem
,
2169 .sysctl_rmem
= sysctl_tcp_rmem
,
2170 .max_header
= MAX_TCP_HEADER
,
2171 .obj_size
= sizeof(struct tcp6_sock
),
2172 .twsk_prot
= &tcp6_timewait_sock_ops
,
2173 .rsk_prot
= &tcp6_request_sock_ops
,
2174 .h
.hashinfo
= &tcp_hashinfo
,
2175 #ifdef CONFIG_COMPAT
2176 .compat_setsockopt
= compat_tcp_setsockopt
,
2177 .compat_getsockopt
= compat_tcp_getsockopt
,
2181 static struct inet6_protocol tcpv6_protocol
= {
2182 .handler
= tcp_v6_rcv
,
2183 .err_handler
= tcp_v6_err
,
2184 .gso_send_check
= tcp_v6_gso_send_check
,
2185 .gso_segment
= tcp_tso_segment
,
2186 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2189 static struct inet_protosw tcpv6_protosw
= {
2190 .type
= SOCK_STREAM
,
2191 .protocol
= IPPROTO_TCP
,
2192 .prot
= &tcpv6_prot
,
2193 .ops
= &inet6_stream_ops
,
2196 .flags
= INET_PROTOSW_PERMANENT
|
2200 static int tcpv6_net_init(struct net
*net
)
2203 struct socket
*sock
;
2206 err
= inet_ctl_sock_create(&sock
, PF_INET6
, SOCK_RAW
, IPPROTO_TCP
);
2210 net
->ipv6
.tcp_sk
= sk
= sock
->sk
;
2211 sk_change_net(sk
, net
);
2215 static void tcpv6_net_exit(struct net
*net
)
2217 sk_release_kernel(net
->ipv6
.tcp_sk
);
2220 static struct pernet_operations tcpv6_net_ops
= {
2221 .init
= tcpv6_net_init
,
2222 .exit
= tcpv6_net_exit
,
2225 int __init
tcpv6_init(void)
2229 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2233 /* register inet6 protocol */
2234 ret
= inet6_register_protosw(&tcpv6_protosw
);
2236 goto out_tcpv6_protocol
;
2238 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2240 goto out_tcpv6_protosw
;
2245 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2247 inet6_unregister_protosw(&tcpv6_protosw
);
2251 void tcpv6_exit(void)
2253 unregister_pernet_subsys(&tcpv6_net_ops
);
2254 inet6_unregister_protosw(&tcpv6_protosw
);
2255 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);