ARM: ep93xx: move timer to its own file
[linux/fpc-iii.git] / net / ipv6 / tcp_ipv6.c
blob6748c4277affad71cd721e3a985af10c31c047ad
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
88 return NULL;
90 #endif
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 int addr_len)
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
123 struct flowi6 fl6;
124 struct dst_entry *dst;
125 int addr_type;
126 int err;
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
134 memset(&fl6, 0, sizeof(fl6));
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
149 * connect() to INADDR_ANY means loopback (BSD'ism).
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
189 * TCP over IPv4
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 goto failure;
222 np->saddr = sk->sk_v6_rcv_saddr;
224 return err;
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
268 np->opt->opt_nflen);
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
279 ip6_set_txhash(sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
291 return 0;
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 int err;
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
335 skb->dev->ifindex);
337 if (!sk) {
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
339 ICMP6_MIB_INERRORS);
340 return;
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
345 return;
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
351 bh_lock_sock(sk);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
355 if (sk->sk_state == TCP_CLOSE)
356 goto out;
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
360 goto out;
363 tp = tcp_sk(sk);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
370 goto out;
373 np = inet6_sk(sk);
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380 goto out;
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
388 if (sk->sk_state == TCP_LISTEN)
389 goto out;
391 if (!ip6_sk_accept_pmtu(sk))
392 goto out;
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
398 &tp->tsq_flags))
399 sock_hold(sk);
400 goto out;
403 icmpv6_err_convert(type, code, &err);
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
407 case TCP_SYN_SENT:
408 case TCP_SYN_RECV:
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
412 if (fastopen && !fastopen->sk)
413 break;
415 if (!sock_owned_by_user(sk)) {
416 sk->sk_err = err;
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419 tcp_done(sk);
420 } else
421 sk->sk_err_soft = err;
422 goto out;
425 if (!sock_owned_by_user(sk) && np->recverr) {
426 sk->sk_err = err;
427 sk->sk_error_report(sk);
428 } else
429 sk->sk_err_soft = err;
431 out:
432 bh_unlock_sock(sk);
433 sock_put(sk);
437 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
438 struct flowi *fl,
439 struct request_sock *req,
440 u16 queue_mapping,
441 struct tcp_fastopen_cookie *foc)
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
446 struct sk_buff *skb;
447 int err = -ENOMEM;
449 /* First, grab a route. */
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
451 goto done;
453 skb = tcp_make_synack(sk, dst, req, foc);
455 if (skb) {
456 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
457 &ireq->ir_v6_rmt_addr);
459 fl6->daddr = ireq->ir_v6_rmt_addr;
460 if (np->repflow && ireq->pktopts)
461 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
463 skb_set_queue_mapping(skb, queue_mapping);
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 err = net_xmit_eval(err);
468 done:
469 return err;
473 static void tcp_v6_reqsk_destructor(struct request_sock *req)
475 kfree_skb(inet_rsk(req)->pktopts);
478 #ifdef CONFIG_TCP_MD5SIG
479 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
480 const struct in6_addr *addr)
482 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
485 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
486 const struct sock *addr_sk)
488 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
491 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
492 int optlen)
494 struct tcp_md5sig cmd;
495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
497 if (optlen < sizeof(cmd))
498 return -EINVAL;
500 if (copy_from_user(&cmd, optval, sizeof(cmd)))
501 return -EFAULT;
503 if (sin6->sin6_family != AF_INET6)
504 return -EINVAL;
506 if (!cmd.tcpm_keylen) {
507 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
509 AF_INET);
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
511 AF_INET6);
514 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
515 return -EINVAL;
517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
525 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 const struct in6_addr *daddr,
527 const struct in6_addr *saddr, int nbytes)
529 struct tcp6_pseudohdr *bp;
530 struct scatterlist sg;
532 bp = &hp->md5_blk.ip6;
533 /* 1. TCP pseudo-header (RFC2460) */
534 bp->saddr = *saddr;
535 bp->daddr = *daddr;
536 bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 bp->len = cpu_to_be32(nbytes);
539 sg_init_one(&sg, bp, sizeof(*bp));
540 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
543 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 const struct in6_addr *daddr, struct in6_addr *saddr,
545 const struct tcphdr *th)
547 struct tcp_md5sig_pool *hp;
548 struct hash_desc *desc;
550 hp = tcp_get_md5sig_pool();
551 if (!hp)
552 goto clear_hash_noput;
553 desc = &hp->md5_desc;
555 if (crypto_hash_init(desc))
556 goto clear_hash;
557 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
558 goto clear_hash;
559 if (tcp_md5_hash_header(hp, th))
560 goto clear_hash;
561 if (tcp_md5_hash_key(hp, key))
562 goto clear_hash;
563 if (crypto_hash_final(desc, md5_hash))
564 goto clear_hash;
566 tcp_put_md5sig_pool();
567 return 0;
569 clear_hash:
570 tcp_put_md5sig_pool();
571 clear_hash_noput:
572 memset(md5_hash, 0, 16);
573 return 1;
576 static int tcp_v6_md5_hash_skb(char *md5_hash,
577 const struct tcp_md5sig_key *key,
578 const struct sock *sk,
579 const struct sk_buff *skb)
581 const struct in6_addr *saddr, *daddr;
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584 const struct tcphdr *th = tcp_hdr(skb);
586 if (sk) { /* valid for establish/request sockets */
587 saddr = &sk->sk_v6_rcv_saddr;
588 daddr = &sk->sk_v6_daddr;
589 } else {
590 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 saddr = &ip6h->saddr;
592 daddr = &ip6h->daddr;
595 hp = tcp_get_md5sig_pool();
596 if (!hp)
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
600 if (crypto_hash_init(desc))
601 goto clear_hash;
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
604 goto clear_hash;
605 if (tcp_md5_hash_header(hp, th))
606 goto clear_hash;
607 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
608 goto clear_hash;
609 if (tcp_md5_hash_key(hp, key))
610 goto clear_hash;
611 if (crypto_hash_final(desc, md5_hash))
612 goto clear_hash;
614 tcp_put_md5sig_pool();
615 return 0;
617 clear_hash:
618 tcp_put_md5sig_pool();
619 clear_hash_noput:
620 memset(md5_hash, 0, 16);
621 return 1;
624 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
626 const __u8 *hash_location = NULL;
627 struct tcp_md5sig_key *hash_expected;
628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 const struct tcphdr *th = tcp_hdr(skb);
630 int genhash;
631 u8 newhash[16];
633 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
634 hash_location = tcp_parse_md5sig_option(th);
636 /* We've parsed the options - do we have a hash? */
637 if (!hash_expected && !hash_location)
638 return false;
640 if (hash_expected && !hash_location) {
641 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
642 return true;
645 if (!hash_expected && hash_location) {
646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
647 return true;
650 /* check the signature */
651 genhash = tcp_v6_md5_hash_skb(newhash,
652 hash_expected,
653 NULL, skb);
655 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
656 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
657 genhash ? "failed" : "mismatch",
658 &ip6h->saddr, ntohs(th->source),
659 &ip6h->daddr, ntohs(th->dest));
660 return true;
662 return false;
664 #endif
666 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
667 struct sk_buff *skb)
669 struct inet_request_sock *ireq = inet_rsk(req);
670 struct ipv6_pinfo *np = inet6_sk(sk);
672 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
673 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
675 /* So that link locals have meaning */
676 if (!sk->sk_bound_dev_if &&
677 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
678 ireq->ir_iif = tcp_v6_iif(skb);
680 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
681 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
682 np->rxopt.bits.rxinfo ||
683 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
684 np->rxopt.bits.rxohlim || np->repflow)) {
685 atomic_inc(&skb->users);
686 ireq->pktopts = skb;
690 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
691 const struct request_sock *req,
692 bool *strict)
694 if (strict)
695 *strict = true;
696 return inet6_csk_route_req(sk, &fl->u.ip6, req);
699 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
700 .family = AF_INET6,
701 .obj_size = sizeof(struct tcp6_request_sock),
702 .rtx_syn_ack = tcp_rtx_synack,
703 .send_ack = tcp_v6_reqsk_send_ack,
704 .destructor = tcp_v6_reqsk_destructor,
705 .send_reset = tcp_v6_send_reset,
706 .syn_ack_timeout = tcp_syn_ack_timeout,
709 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
710 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
711 sizeof(struct ipv6hdr),
712 #ifdef CONFIG_TCP_MD5SIG
713 .req_md5_lookup = tcp_v6_md5_lookup,
714 .calc_md5_hash = tcp_v6_md5_hash_skb,
715 #endif
716 .init_req = tcp_v6_init_req,
717 #ifdef CONFIG_SYN_COOKIES
718 .cookie_init_seq = cookie_v6_init_sequence,
719 #endif
720 .route_req = tcp_v6_route_req,
721 .init_seq = tcp_v6_init_sequence,
722 .send_synack = tcp_v6_send_synack,
723 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
726 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
727 u32 ack, u32 win, u32 tsval, u32 tsecr,
728 int oif, struct tcp_md5sig_key *key, int rst,
729 u8 tclass, u32 label)
731 const struct tcphdr *th = tcp_hdr(skb);
732 struct tcphdr *t1;
733 struct sk_buff *buff;
734 struct flowi6 fl6;
735 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
736 struct sock *ctl_sk = net->ipv6.tcp_sk;
737 unsigned int tot_len = sizeof(struct tcphdr);
738 struct dst_entry *dst;
739 __be32 *topt;
741 if (tsecr)
742 tot_len += TCPOLEN_TSTAMP_ALIGNED;
743 #ifdef CONFIG_TCP_MD5SIG
744 if (key)
745 tot_len += TCPOLEN_MD5SIG_ALIGNED;
746 #endif
748 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
749 GFP_ATOMIC);
750 if (!buff)
751 return;
753 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
755 t1 = (struct tcphdr *) skb_push(buff, tot_len);
756 skb_reset_transport_header(buff);
758 /* Swap the send and the receive. */
759 memset(t1, 0, sizeof(*t1));
760 t1->dest = th->source;
761 t1->source = th->dest;
762 t1->doff = tot_len / 4;
763 t1->seq = htonl(seq);
764 t1->ack_seq = htonl(ack);
765 t1->ack = !rst || !th->ack;
766 t1->rst = rst;
767 t1->window = htons(win);
769 topt = (__be32 *)(t1 + 1);
771 if (tsecr) {
772 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
773 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
774 *topt++ = htonl(tsval);
775 *topt++ = htonl(tsecr);
778 #ifdef CONFIG_TCP_MD5SIG
779 if (key) {
780 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
781 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
782 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
783 &ipv6_hdr(skb)->saddr,
784 &ipv6_hdr(skb)->daddr, t1);
786 #endif
788 memset(&fl6, 0, sizeof(fl6));
789 fl6.daddr = ipv6_hdr(skb)->saddr;
790 fl6.saddr = ipv6_hdr(skb)->daddr;
791 fl6.flowlabel = label;
793 buff->ip_summed = CHECKSUM_PARTIAL;
794 buff->csum = 0;
796 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
798 fl6.flowi6_proto = IPPROTO_TCP;
799 if (rt6_need_strict(&fl6.daddr) && !oif)
800 fl6.flowi6_oif = tcp_v6_iif(skb);
801 else
802 fl6.flowi6_oif = oif;
803 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
804 fl6.fl6_dport = t1->dest;
805 fl6.fl6_sport = t1->source;
806 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
808 /* Pass a socket to ip6_dst_lookup either it is for RST
809 * Underlying function will use this to retrieve the network
810 * namespace
812 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
813 if (!IS_ERR(dst)) {
814 skb_dst_set(buff, dst);
815 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
816 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
817 if (rst)
818 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
819 return;
822 kfree_skb(buff);
825 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
827 const struct tcphdr *th = tcp_hdr(skb);
828 u32 seq = 0, ack_seq = 0;
829 struct tcp_md5sig_key *key = NULL;
830 #ifdef CONFIG_TCP_MD5SIG
831 const __u8 *hash_location = NULL;
832 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
833 unsigned char newhash[16];
834 int genhash;
835 struct sock *sk1 = NULL;
836 #endif
837 int oif;
839 if (th->rst)
840 return;
842 /* If sk not NULL, it means we did a successful lookup and incoming
843 * route had to be correct. prequeue might have dropped our dst.
845 if (!sk && !ipv6_unicast_destination(skb))
846 return;
848 #ifdef CONFIG_TCP_MD5SIG
849 hash_location = tcp_parse_md5sig_option(th);
850 if (!sk && hash_location) {
852 * active side is lost. Try to find listening socket through
853 * source port, and then find md5 key through listening socket.
854 * we are not loose security here:
855 * Incoming packet is checked with md5 hash with finding key,
856 * no RST generated if md5 hash doesn't match.
858 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
859 &tcp_hashinfo, &ipv6h->saddr,
860 th->source, &ipv6h->daddr,
861 ntohs(th->source), tcp_v6_iif(skb));
862 if (!sk1)
863 return;
865 rcu_read_lock();
866 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
867 if (!key)
868 goto release_sk1;
870 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
871 if (genhash || memcmp(hash_location, newhash, 16) != 0)
872 goto release_sk1;
873 } else {
874 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
876 #endif
878 if (th->ack)
879 seq = ntohl(th->ack_seq);
880 else
881 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
882 (th->doff << 2);
884 oif = sk ? sk->sk_bound_dev_if : 0;
885 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
887 #ifdef CONFIG_TCP_MD5SIG
888 release_sk1:
889 if (sk1) {
890 rcu_read_unlock();
891 sock_put(sk1);
893 #endif
896 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
897 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
898 struct tcp_md5sig_key *key, u8 tclass,
899 u32 label)
901 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
902 tclass, label);
905 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
907 struct inet_timewait_sock *tw = inet_twsk(sk);
908 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
910 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
911 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
912 tcp_time_stamp + tcptw->tw_ts_offset,
913 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
914 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
916 inet_twsk_put(tw);
919 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
920 struct request_sock *req)
922 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
923 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
925 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
926 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
927 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
928 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
929 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
930 0, 0);
934 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
936 const struct tcphdr *th = tcp_hdr(skb);
937 struct request_sock *req;
938 struct sock *nsk;
940 /* Find possible connection requests. */
941 req = inet6_csk_search_req(sk, th->source,
942 &ipv6_hdr(skb)->saddr,
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk)
947 reqsk_put(req);
948 return nsk;
950 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
951 &ipv6_hdr(skb)->saddr, th->source,
952 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
953 tcp_v6_iif(skb));
955 if (nsk) {
956 if (nsk->sk_state != TCP_TIME_WAIT) {
957 bh_lock_sock(nsk);
958 return nsk;
960 inet_twsk_put(inet_twsk(nsk));
961 return NULL;
964 #ifdef CONFIG_SYN_COOKIES
965 if (!th->syn)
966 sk = cookie_v6_check(sk, skb);
967 #endif
968 return sk;
971 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
973 if (skb->protocol == htons(ETH_P_IP))
974 return tcp_v4_conn_request(sk, skb);
976 if (!ipv6_unicast_destination(skb))
977 goto drop;
979 return tcp_conn_request(&tcp6_request_sock_ops,
980 &tcp_request_sock_ipv6_ops, sk, skb);
982 drop:
983 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
984 return 0; /* don't send reset */
987 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
988 struct request_sock *req,
989 struct dst_entry *dst)
991 struct inet_request_sock *ireq;
992 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
993 struct tcp6_sock *newtcp6sk;
994 struct inet_sock *newinet;
995 struct tcp_sock *newtp;
996 struct sock *newsk;
997 #ifdef CONFIG_TCP_MD5SIG
998 struct tcp_md5sig_key *key;
999 #endif
1000 struct flowi6 fl6;
1002 if (skb->protocol == htons(ETH_P_IP)) {
1004 * v6 mapped
1007 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1009 if (!newsk)
1010 return NULL;
1012 newtcp6sk = (struct tcp6_sock *)newsk;
1013 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1015 newinet = inet_sk(newsk);
1016 newnp = inet6_sk(newsk);
1017 newtp = tcp_sk(newsk);
1019 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1021 newnp->saddr = newsk->sk_v6_rcv_saddr;
1023 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1024 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1025 #ifdef CONFIG_TCP_MD5SIG
1026 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1027 #endif
1029 newnp->ipv6_ac_list = NULL;
1030 newnp->ipv6_fl_list = NULL;
1031 newnp->pktoptions = NULL;
1032 newnp->opt = NULL;
1033 newnp->mcast_oif = tcp_v6_iif(skb);
1034 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1035 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1036 if (np->repflow)
1037 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1040 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1041 * here, tcp_create_openreq_child now does this for us, see the comment in
1042 * that function for the gory details. -acme
1045 /* It is tricky place. Until this moment IPv4 tcp
1046 worked with IPv6 icsk.icsk_af_ops.
1047 Sync it now.
1049 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1051 return newsk;
1054 ireq = inet_rsk(req);
1056 if (sk_acceptq_is_full(sk))
1057 goto out_overflow;
1059 if (!dst) {
1060 dst = inet6_csk_route_req(sk, &fl6, req);
1061 if (!dst)
1062 goto out;
1065 newsk = tcp_create_openreq_child(sk, req, skb);
1066 if (!newsk)
1067 goto out_nonewsk;
1070 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1071 * count here, tcp_create_openreq_child now does this for us, see the
1072 * comment in that function for the gory details. -acme
1075 newsk->sk_gso_type = SKB_GSO_TCPV6;
1076 __ip6_dst_store(newsk, dst, NULL, NULL);
1077 inet6_sk_rx_dst_set(newsk, skb);
1079 newtcp6sk = (struct tcp6_sock *)newsk;
1080 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1082 newtp = tcp_sk(newsk);
1083 newinet = inet_sk(newsk);
1084 newnp = inet6_sk(newsk);
1086 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1088 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1089 newnp->saddr = ireq->ir_v6_loc_addr;
1090 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1091 newsk->sk_bound_dev_if = ireq->ir_iif;
1093 ip6_set_txhash(newsk);
1095 /* Now IPv6 options...
1097 First: no IPv4 options.
1099 newinet->inet_opt = NULL;
1100 newnp->ipv6_ac_list = NULL;
1101 newnp->ipv6_fl_list = NULL;
1103 /* Clone RX bits */
1104 newnp->rxopt.all = np->rxopt.all;
1106 /* Clone pktoptions received with SYN */
1107 newnp->pktoptions = NULL;
1108 if (ireq->pktopts) {
1109 newnp->pktoptions = skb_clone(ireq->pktopts,
1110 sk_gfp_atomic(sk, GFP_ATOMIC));
1111 consume_skb(ireq->pktopts);
1112 ireq->pktopts = NULL;
1113 if (newnp->pktoptions)
1114 skb_set_owner_r(newnp->pktoptions, newsk);
1116 newnp->opt = NULL;
1117 newnp->mcast_oif = tcp_v6_iif(skb);
1118 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1119 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1120 if (np->repflow)
1121 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1123 /* Clone native IPv6 options from listening socket (if any)
1125 Yes, keeping reference count would be much more clever,
1126 but we make one more one thing there: reattach optmem
1127 to newsk.
1129 if (np->opt)
1130 newnp->opt = ipv6_dup_options(newsk, np->opt);
1132 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1133 if (newnp->opt)
1134 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1135 newnp->opt->opt_flen);
1137 tcp_ca_openreq_child(newsk, dst);
1139 tcp_sync_mss(newsk, dst_mtu(dst));
1140 newtp->advmss = dst_metric_advmss(dst);
1141 if (tcp_sk(sk)->rx_opt.user_mss &&
1142 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1143 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1145 tcp_initialize_rcv_mss(newsk);
1147 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1148 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1150 #ifdef CONFIG_TCP_MD5SIG
1151 /* Copy over the MD5 key from the original socket */
1152 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1153 if (key) {
1154 /* We're using one, so create a matching key
1155 * on the newsk structure. If we fail to get
1156 * memory, then we end up not copying the key
1157 * across. Shucks.
1159 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1160 AF_INET6, key->key, key->keylen,
1161 sk_gfp_atomic(sk, GFP_ATOMIC));
1163 #endif
1165 if (__inet_inherit_port(sk, newsk) < 0) {
1166 inet_csk_prepare_forced_close(newsk);
1167 tcp_done(newsk);
1168 goto out;
1170 __inet_hash(newsk, NULL);
1172 return newsk;
1174 out_overflow:
1175 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1176 out_nonewsk:
1177 dst_release(dst);
1178 out:
1179 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1180 return NULL;
1183 /* The socket must have it's spinlock held when we get
1184 * here.
1186 * We have a potential double-lock case here, so even when
1187 * doing backlog processing we use the BH locking scheme.
1188 * This is because we cannot sleep with the original spinlock
1189 * held.
1191 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1193 struct ipv6_pinfo *np = inet6_sk(sk);
1194 struct tcp_sock *tp;
1195 struct sk_buff *opt_skb = NULL;
1197 /* Imagine: socket is IPv6. IPv4 packet arrives,
1198 goes to IPv4 receive handler and backlogged.
1199 From backlog it always goes here. Kerboom...
1200 Fortunately, tcp_rcv_established and rcv_established
1201 handle them correctly, but it is not case with
1202 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1205 if (skb->protocol == htons(ETH_P_IP))
1206 return tcp_v4_do_rcv(sk, skb);
1208 if (sk_filter(sk, skb))
1209 goto discard;
1212 * socket locking is here for SMP purposes as backlog rcv
1213 * is currently called with bh processing disabled.
1216 /* Do Stevens' IPV6_PKTOPTIONS.
1218 Yes, guys, it is the only place in our code, where we
1219 may make it not affecting IPv4.
1220 The rest of code is protocol independent,
1221 and I do not like idea to uglify IPv4.
1223 Actually, all the idea behind IPV6_PKTOPTIONS
1224 looks not very well thought. For now we latch
1225 options, received in the last packet, enqueued
1226 by tcp. Feel free to propose better solution.
1227 --ANK (980728)
1229 if (np->rxopt.all)
1230 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1232 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1233 struct dst_entry *dst = sk->sk_rx_dst;
1235 sock_rps_save_rxhash(sk, skb);
1236 sk_mark_napi_id(sk, skb);
1237 if (dst) {
1238 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1239 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1240 dst_release(dst);
1241 sk->sk_rx_dst = NULL;
1245 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1246 if (opt_skb)
1247 goto ipv6_pktoptions;
1248 return 0;
1251 if (tcp_checksum_complete(skb))
1252 goto csum_err;
1254 if (sk->sk_state == TCP_LISTEN) {
1255 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1256 if (!nsk)
1257 goto discard;
1260 * Queue it on the new socket if the new socket is active,
1261 * otherwise we just shortcircuit this and continue with
1262 * the new socket..
1264 if (nsk != sk) {
1265 sock_rps_save_rxhash(nsk, skb);
1266 sk_mark_napi_id(sk, skb);
1267 if (tcp_child_process(sk, nsk, skb))
1268 goto reset;
1269 if (opt_skb)
1270 __kfree_skb(opt_skb);
1271 return 0;
1273 } else
1274 sock_rps_save_rxhash(sk, skb);
1276 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1277 goto reset;
1278 if (opt_skb)
1279 goto ipv6_pktoptions;
1280 return 0;
1282 reset:
1283 tcp_v6_send_reset(sk, skb);
1284 discard:
1285 if (opt_skb)
1286 __kfree_skb(opt_skb);
1287 kfree_skb(skb);
1288 return 0;
1289 csum_err:
1290 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1291 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1292 goto discard;
1295 ipv6_pktoptions:
1296 /* Do you ask, what is it?
1298 1. skb was enqueued by tcp.
1299 2. skb is added to tail of read queue, rather than out of order.
1300 3. socket is not in passive state.
1301 4. Finally, it really contains options, which user wants to receive.
1303 tp = tcp_sk(sk);
1304 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1305 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1306 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1307 np->mcast_oif = tcp_v6_iif(opt_skb);
1308 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1309 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1310 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1311 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1312 if (np->repflow)
1313 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1314 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1315 skb_set_owner_r(opt_skb, sk);
1316 opt_skb = xchg(&np->pktoptions, opt_skb);
1317 } else {
1318 __kfree_skb(opt_skb);
1319 opt_skb = xchg(&np->pktoptions, NULL);
1323 kfree_skb(opt_skb);
1324 return 0;
1327 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1328 const struct tcphdr *th)
1330 /* This is tricky: we move IP6CB at its correct location into
1331 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1332 * _decode_session6() uses IP6CB().
1333 * barrier() makes sure compiler won't play aliasing games.
1335 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1336 sizeof(struct inet6_skb_parm));
1337 barrier();
1339 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1340 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1341 skb->len - th->doff*4);
1342 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1343 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1344 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1345 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1346 TCP_SKB_CB(skb)->sacked = 0;
1349 static void tcp_v6_restore_cb(struct sk_buff *skb)
1351 /* We need to move header back to the beginning if xfrm6_policy_check()
1352 * and tcp_v6_fill_cb() are going to be called again.
1354 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1355 sizeof(struct inet6_skb_parm));
1358 static int tcp_v6_rcv(struct sk_buff *skb)
1360 const struct tcphdr *th;
1361 const struct ipv6hdr *hdr;
1362 struct sock *sk;
1363 int ret;
1364 struct net *net = dev_net(skb->dev);
1366 if (skb->pkt_type != PACKET_HOST)
1367 goto discard_it;
1370 * Count it even if it's bad.
1372 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1374 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1375 goto discard_it;
1377 th = tcp_hdr(skb);
1379 if (th->doff < sizeof(struct tcphdr)/4)
1380 goto bad_packet;
1381 if (!pskb_may_pull(skb, th->doff*4))
1382 goto discard_it;
1384 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1385 goto csum_error;
1387 th = tcp_hdr(skb);
1388 hdr = ipv6_hdr(skb);
1390 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1391 inet6_iif(skb));
1392 if (!sk)
1393 goto no_tcp_socket;
1395 process:
1396 if (sk->sk_state == TCP_TIME_WAIT)
1397 goto do_time_wait;
1399 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1400 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1401 goto discard_and_relse;
1404 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1405 goto discard_and_relse;
1407 tcp_v6_fill_cb(skb, hdr, th);
1409 #ifdef CONFIG_TCP_MD5SIG
1410 if (tcp_v6_inbound_md5_hash(sk, skb))
1411 goto discard_and_relse;
1412 #endif
1414 if (sk_filter(sk, skb))
1415 goto discard_and_relse;
1417 sk_incoming_cpu_update(sk);
1418 skb->dev = NULL;
1420 bh_lock_sock_nested(sk);
1421 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1422 ret = 0;
1423 if (!sock_owned_by_user(sk)) {
1424 if (!tcp_prequeue(sk, skb))
1425 ret = tcp_v6_do_rcv(sk, skb);
1426 } else if (unlikely(sk_add_backlog(sk, skb,
1427 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1428 bh_unlock_sock(sk);
1429 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1430 goto discard_and_relse;
1432 bh_unlock_sock(sk);
1434 sock_put(sk);
1435 return ret ? -1 : 0;
1437 no_tcp_socket:
1438 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1439 goto discard_it;
1441 tcp_v6_fill_cb(skb, hdr, th);
1443 if (tcp_checksum_complete(skb)) {
1444 csum_error:
1445 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1446 bad_packet:
1447 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1448 } else {
1449 tcp_v6_send_reset(NULL, skb);
1452 discard_it:
1453 kfree_skb(skb);
1454 return 0;
1456 discard_and_relse:
1457 sock_put(sk);
1458 goto discard_it;
1460 do_time_wait:
1461 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1462 inet_twsk_put(inet_twsk(sk));
1463 goto discard_it;
1466 tcp_v6_fill_cb(skb, hdr, th);
1468 if (tcp_checksum_complete(skb)) {
1469 inet_twsk_put(inet_twsk(sk));
1470 goto csum_error;
1473 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1474 case TCP_TW_SYN:
1476 struct sock *sk2;
1478 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1479 &ipv6_hdr(skb)->saddr, th->source,
1480 &ipv6_hdr(skb)->daddr,
1481 ntohs(th->dest), tcp_v6_iif(skb));
1482 if (sk2) {
1483 struct inet_timewait_sock *tw = inet_twsk(sk);
1484 inet_twsk_deschedule(tw);
1485 inet_twsk_put(tw);
1486 sk = sk2;
1487 tcp_v6_restore_cb(skb);
1488 goto process;
1490 /* Fall through to ACK */
1492 case TCP_TW_ACK:
1493 tcp_v6_timewait_ack(sk, skb);
1494 break;
1495 case TCP_TW_RST:
1496 tcp_v6_restore_cb(skb);
1497 goto no_tcp_socket;
1498 case TCP_TW_SUCCESS:
1501 goto discard_it;
1504 static void tcp_v6_early_demux(struct sk_buff *skb)
1506 const struct ipv6hdr *hdr;
1507 const struct tcphdr *th;
1508 struct sock *sk;
1510 if (skb->pkt_type != PACKET_HOST)
1511 return;
1513 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1514 return;
1516 hdr = ipv6_hdr(skb);
1517 th = tcp_hdr(skb);
1519 if (th->doff < sizeof(struct tcphdr) / 4)
1520 return;
1522 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1523 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1524 &hdr->saddr, th->source,
1525 &hdr->daddr, ntohs(th->dest),
1526 inet6_iif(skb));
1527 if (sk) {
1528 skb->sk = sk;
1529 skb->destructor = sock_edemux;
1530 if (sk_fullsock(sk)) {
1531 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1533 if (dst)
1534 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1535 if (dst &&
1536 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1537 skb_dst_set_noref(skb, dst);
1542 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1543 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1544 .twsk_unique = tcp_twsk_unique,
1545 .twsk_destructor = tcp_twsk_destructor,
1548 static const struct inet_connection_sock_af_ops ipv6_specific = {
1549 .queue_xmit = inet6_csk_xmit,
1550 .send_check = tcp_v6_send_check,
1551 .rebuild_header = inet6_sk_rebuild_header,
1552 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1553 .conn_request = tcp_v6_conn_request,
1554 .syn_recv_sock = tcp_v6_syn_recv_sock,
1555 .net_header_len = sizeof(struct ipv6hdr),
1556 .net_frag_header_len = sizeof(struct frag_hdr),
1557 .setsockopt = ipv6_setsockopt,
1558 .getsockopt = ipv6_getsockopt,
1559 .addr2sockaddr = inet6_csk_addr2sockaddr,
1560 .sockaddr_len = sizeof(struct sockaddr_in6),
1561 .bind_conflict = inet6_csk_bind_conflict,
1562 #ifdef CONFIG_COMPAT
1563 .compat_setsockopt = compat_ipv6_setsockopt,
1564 .compat_getsockopt = compat_ipv6_getsockopt,
1565 #endif
1566 .mtu_reduced = tcp_v6_mtu_reduced,
1569 #ifdef CONFIG_TCP_MD5SIG
1570 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1571 .md5_lookup = tcp_v6_md5_lookup,
1572 .calc_md5_hash = tcp_v6_md5_hash_skb,
1573 .md5_parse = tcp_v6_parse_md5_keys,
1575 #endif
1578 * TCP over IPv4 via INET6 API
1580 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1581 .queue_xmit = ip_queue_xmit,
1582 .send_check = tcp_v4_send_check,
1583 .rebuild_header = inet_sk_rebuild_header,
1584 .sk_rx_dst_set = inet_sk_rx_dst_set,
1585 .conn_request = tcp_v6_conn_request,
1586 .syn_recv_sock = tcp_v6_syn_recv_sock,
1587 .net_header_len = sizeof(struct iphdr),
1588 .setsockopt = ipv6_setsockopt,
1589 .getsockopt = ipv6_getsockopt,
1590 .addr2sockaddr = inet6_csk_addr2sockaddr,
1591 .sockaddr_len = sizeof(struct sockaddr_in6),
1592 .bind_conflict = inet6_csk_bind_conflict,
1593 #ifdef CONFIG_COMPAT
1594 .compat_setsockopt = compat_ipv6_setsockopt,
1595 .compat_getsockopt = compat_ipv6_getsockopt,
1596 #endif
1597 .mtu_reduced = tcp_v4_mtu_reduced,
1600 #ifdef CONFIG_TCP_MD5SIG
1601 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1602 .md5_lookup = tcp_v4_md5_lookup,
1603 .calc_md5_hash = tcp_v4_md5_hash_skb,
1604 .md5_parse = tcp_v6_parse_md5_keys,
1606 #endif
1608 /* NOTE: A lot of things set to zero explicitly by call to
1609 * sk_alloc() so need not be done here.
1611 static int tcp_v6_init_sock(struct sock *sk)
1613 struct inet_connection_sock *icsk = inet_csk(sk);
1615 tcp_init_sock(sk);
1617 icsk->icsk_af_ops = &ipv6_specific;
1619 #ifdef CONFIG_TCP_MD5SIG
1620 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1621 #endif
1623 return 0;
1626 static void tcp_v6_destroy_sock(struct sock *sk)
1628 tcp_v4_destroy_sock(sk);
1629 inet6_destroy_sock(sk);
1632 #ifdef CONFIG_PROC_FS
1633 /* Proc filesystem TCPv6 sock list dumping. */
1634 static void get_openreq6(struct seq_file *seq,
1635 struct request_sock *req, int i, kuid_t uid)
1637 long ttd = req->rsk_timer.expires - jiffies;
1638 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1639 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1641 if (ttd < 0)
1642 ttd = 0;
1644 seq_printf(seq,
1645 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1646 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1648 src->s6_addr32[0], src->s6_addr32[1],
1649 src->s6_addr32[2], src->s6_addr32[3],
1650 inet_rsk(req)->ir_num,
1651 dest->s6_addr32[0], dest->s6_addr32[1],
1652 dest->s6_addr32[2], dest->s6_addr32[3],
1653 ntohs(inet_rsk(req)->ir_rmt_port),
1654 TCP_SYN_RECV,
1655 0, 0, /* could print option size, but that is af dependent. */
1656 1, /* timers active (only the expire timer) */
1657 jiffies_to_clock_t(ttd),
1658 req->num_timeout,
1659 from_kuid_munged(seq_user_ns(seq), uid),
1660 0, /* non standard timer */
1661 0, /* open_requests have no inode */
1662 0, req);
1665 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1667 const struct in6_addr *dest, *src;
1668 __u16 destp, srcp;
1669 int timer_active;
1670 unsigned long timer_expires;
1671 const struct inet_sock *inet = inet_sk(sp);
1672 const struct tcp_sock *tp = tcp_sk(sp);
1673 const struct inet_connection_sock *icsk = inet_csk(sp);
1674 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1676 dest = &sp->sk_v6_daddr;
1677 src = &sp->sk_v6_rcv_saddr;
1678 destp = ntohs(inet->inet_dport);
1679 srcp = ntohs(inet->inet_sport);
1681 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1682 timer_active = 1;
1683 timer_expires = icsk->icsk_timeout;
1684 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1685 timer_active = 4;
1686 timer_expires = icsk->icsk_timeout;
1687 } else if (timer_pending(&sp->sk_timer)) {
1688 timer_active = 2;
1689 timer_expires = sp->sk_timer.expires;
1690 } else {
1691 timer_active = 0;
1692 timer_expires = jiffies;
1695 seq_printf(seq,
1696 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1697 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1699 src->s6_addr32[0], src->s6_addr32[1],
1700 src->s6_addr32[2], src->s6_addr32[3], srcp,
1701 dest->s6_addr32[0], dest->s6_addr32[1],
1702 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1703 sp->sk_state,
1704 tp->write_seq-tp->snd_una,
1705 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1706 timer_active,
1707 jiffies_delta_to_clock_t(timer_expires - jiffies),
1708 icsk->icsk_retransmits,
1709 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1710 icsk->icsk_probes_out,
1711 sock_i_ino(sp),
1712 atomic_read(&sp->sk_refcnt), sp,
1713 jiffies_to_clock_t(icsk->icsk_rto),
1714 jiffies_to_clock_t(icsk->icsk_ack.ato),
1715 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1716 tp->snd_cwnd,
1717 sp->sk_state == TCP_LISTEN ?
1718 (fastopenq ? fastopenq->max_qlen : 0) :
1719 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1723 static void get_timewait6_sock(struct seq_file *seq,
1724 struct inet_timewait_sock *tw, int i)
1726 long delta = tw->tw_timer.expires - jiffies;
1727 const struct in6_addr *dest, *src;
1728 __u16 destp, srcp;
1730 dest = &tw->tw_v6_daddr;
1731 src = &tw->tw_v6_rcv_saddr;
1732 destp = ntohs(tw->tw_dport);
1733 srcp = ntohs(tw->tw_sport);
1735 seq_printf(seq,
1736 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1737 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1739 src->s6_addr32[0], src->s6_addr32[1],
1740 src->s6_addr32[2], src->s6_addr32[3], srcp,
1741 dest->s6_addr32[0], dest->s6_addr32[1],
1742 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1743 tw->tw_substate, 0, 0,
1744 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1745 atomic_read(&tw->tw_refcnt), tw);
1748 static int tcp6_seq_show(struct seq_file *seq, void *v)
1750 struct tcp_iter_state *st;
1751 struct sock *sk = v;
1753 if (v == SEQ_START_TOKEN) {
1754 seq_puts(seq,
1755 " sl "
1756 "local_address "
1757 "remote_address "
1758 "st tx_queue rx_queue tr tm->when retrnsmt"
1759 " uid timeout inode\n");
1760 goto out;
1762 st = seq->private;
1764 switch (st->state) {
1765 case TCP_SEQ_STATE_LISTENING:
1766 case TCP_SEQ_STATE_ESTABLISHED:
1767 if (sk->sk_state == TCP_TIME_WAIT)
1768 get_timewait6_sock(seq, v, st->num);
1769 else
1770 get_tcp6_sock(seq, v, st->num);
1771 break;
1772 case TCP_SEQ_STATE_OPENREQ:
1773 get_openreq6(seq, v, st->num, st->uid);
1774 break;
1776 out:
1777 return 0;
1780 static const struct file_operations tcp6_afinfo_seq_fops = {
1781 .owner = THIS_MODULE,
1782 .open = tcp_seq_open,
1783 .read = seq_read,
1784 .llseek = seq_lseek,
1785 .release = seq_release_net
1788 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1789 .name = "tcp6",
1790 .family = AF_INET6,
1791 .seq_fops = &tcp6_afinfo_seq_fops,
1792 .seq_ops = {
1793 .show = tcp6_seq_show,
1797 int __net_init tcp6_proc_init(struct net *net)
1799 return tcp_proc_register(net, &tcp6_seq_afinfo);
1802 void tcp6_proc_exit(struct net *net)
1804 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1806 #endif
1808 static void tcp_v6_clear_sk(struct sock *sk, int size)
1810 struct inet_sock *inet = inet_sk(sk);
1812 /* we do not want to clear pinet6 field, because of RCU lookups */
1813 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1815 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1816 memset(&inet->pinet6 + 1, 0, size);
1819 struct proto tcpv6_prot = {
1820 .name = "TCPv6",
1821 .owner = THIS_MODULE,
1822 .close = tcp_close,
1823 .connect = tcp_v6_connect,
1824 .disconnect = tcp_disconnect,
1825 .accept = inet_csk_accept,
1826 .ioctl = tcp_ioctl,
1827 .init = tcp_v6_init_sock,
1828 .destroy = tcp_v6_destroy_sock,
1829 .shutdown = tcp_shutdown,
1830 .setsockopt = tcp_setsockopt,
1831 .getsockopt = tcp_getsockopt,
1832 .recvmsg = tcp_recvmsg,
1833 .sendmsg = tcp_sendmsg,
1834 .sendpage = tcp_sendpage,
1835 .backlog_rcv = tcp_v6_do_rcv,
1836 .release_cb = tcp_release_cb,
1837 .hash = inet_hash,
1838 .unhash = inet_unhash,
1839 .get_port = inet_csk_get_port,
1840 .enter_memory_pressure = tcp_enter_memory_pressure,
1841 .stream_memory_free = tcp_stream_memory_free,
1842 .sockets_allocated = &tcp_sockets_allocated,
1843 .memory_allocated = &tcp_memory_allocated,
1844 .memory_pressure = &tcp_memory_pressure,
1845 .orphan_count = &tcp_orphan_count,
1846 .sysctl_mem = sysctl_tcp_mem,
1847 .sysctl_wmem = sysctl_tcp_wmem,
1848 .sysctl_rmem = sysctl_tcp_rmem,
1849 .max_header = MAX_TCP_HEADER,
1850 .obj_size = sizeof(struct tcp6_sock),
1851 .slab_flags = SLAB_DESTROY_BY_RCU,
1852 .twsk_prot = &tcp6_timewait_sock_ops,
1853 .rsk_prot = &tcp6_request_sock_ops,
1854 .h.hashinfo = &tcp_hashinfo,
1855 .no_autobind = true,
1856 #ifdef CONFIG_COMPAT
1857 .compat_setsockopt = compat_tcp_setsockopt,
1858 .compat_getsockopt = compat_tcp_getsockopt,
1859 #endif
1860 #ifdef CONFIG_MEMCG_KMEM
1861 .proto_cgroup = tcp_proto_cgroup,
1862 #endif
1863 .clear_sk = tcp_v6_clear_sk,
1866 static const struct inet6_protocol tcpv6_protocol = {
1867 .early_demux = tcp_v6_early_demux,
1868 .handler = tcp_v6_rcv,
1869 .err_handler = tcp_v6_err,
1870 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1873 static struct inet_protosw tcpv6_protosw = {
1874 .type = SOCK_STREAM,
1875 .protocol = IPPROTO_TCP,
1876 .prot = &tcpv6_prot,
1877 .ops = &inet6_stream_ops,
1878 .flags = INET_PROTOSW_PERMANENT |
1879 INET_PROTOSW_ICSK,
1882 static int __net_init tcpv6_net_init(struct net *net)
1884 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1885 SOCK_RAW, IPPROTO_TCP, net);
1888 static void __net_exit tcpv6_net_exit(struct net *net)
1890 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1893 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1895 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1898 static struct pernet_operations tcpv6_net_ops = {
1899 .init = tcpv6_net_init,
1900 .exit = tcpv6_net_exit,
1901 .exit_batch = tcpv6_net_exit_batch,
1904 int __init tcpv6_init(void)
1906 int ret;
1908 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1909 if (ret)
1910 goto out;
1912 /* register inet6 protocol */
1913 ret = inet6_register_protosw(&tcpv6_protosw);
1914 if (ret)
1915 goto out_tcpv6_protocol;
1917 ret = register_pernet_subsys(&tcpv6_net_ops);
1918 if (ret)
1919 goto out_tcpv6_protosw;
1920 out:
1921 return ret;
1923 out_tcpv6_protosw:
1924 inet6_unregister_protosw(&tcpv6_protosw);
1925 out_tcpv6_protocol:
1926 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1927 goto out;
1930 void tcpv6_exit(void)
1932 unregister_pernet_subsys(&tcpv6_net_ops);
1933 inet6_unregister_protosw(&tcpv6_protosw);
1934 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);