[PATCH] uml: add dependency to arch/um/Makefile for parallel builds
[linux/fpc-iii.git] / net / ipv6 / tcp_ipv6.c
blobf6e288dc116ede93c2f755075c641303ca4bca47
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
36 #include <linux/in.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
48 #include <net/tcp.h>
49 #include <net/ndisc.h>
50 #include <net/ipv6.h>
51 #include <net/transp_v6.h>
52 #include <net/addrconf.h>
53 #include <net/ip6_route.h>
54 #include <net/ip6_checksum.h>
55 #include <net/inet_ecn.h>
56 #include <net/protocol.h>
57 #include <net/xfrm.h>
58 #include <net/addrconf.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
62 #include <asm/uaccess.h>
64 #include <linux/proc_fs.h>
65 #include <linux/seq_file.h>
67 static void tcp_v6_send_reset(struct sk_buff *skb);
68 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
69 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
70 struct sk_buff *skb);
72 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
73 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75 static struct tcp_func ipv6_mapped;
76 static struct tcp_func ipv6_specific;
78 /* I have no idea if this is a good hash for v6 or not. -DaveM */
79 static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
80 struct in6_addr *faddr, u16 fport)
82 int hashent = (lport ^ fport);
84 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
85 hashent ^= hashent>>16;
86 hashent ^= hashent>>8;
87 return (hashent & (tcp_ehash_size - 1));
90 static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
92 struct inet_sock *inet = inet_sk(sk);
93 struct ipv6_pinfo *np = inet6_sk(sk);
94 struct in6_addr *laddr = &np->rcv_saddr;
95 struct in6_addr *faddr = &np->daddr;
96 __u16 lport = inet->num;
97 __u16 fport = inet->dport;
98 return tcp_v6_hashfn(laddr, lport, faddr, fport);
101 static inline int tcp_v6_bind_conflict(struct sock *sk,
102 struct tcp_bind_bucket *tb)
104 struct sock *sk2;
105 struct hlist_node *node;
107 /* We must walk the whole port owner list in this case. -DaveM */
108 sk_for_each_bound(sk2, node, &tb->owners) {
109 if (sk != sk2 &&
110 (!sk->sk_bound_dev_if ||
111 !sk2->sk_bound_dev_if ||
112 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
113 (!sk->sk_reuse || !sk2->sk_reuse ||
114 sk2->sk_state == TCP_LISTEN) &&
115 ipv6_rcv_saddr_equal(sk, sk2))
116 break;
119 return node != NULL;
122 /* Grrr, addr_type already calculated by caller, but I don't want
123 * to add some silly "cookie" argument to this method just for that.
124 * But it doesn't matter, the recalculation is in the rarest path
125 * this function ever takes.
127 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
129 struct tcp_bind_hashbucket *head;
130 struct tcp_bind_bucket *tb;
131 struct hlist_node *node;
132 int ret;
134 local_bh_disable();
135 if (snum == 0) {
136 int low = sysctl_local_port_range[0];
137 int high = sysctl_local_port_range[1];
138 int remaining = (high - low) + 1;
139 int rover;
141 spin_lock(&tcp_portalloc_lock);
142 if (tcp_port_rover < low)
143 rover = low;
144 else
145 rover = tcp_port_rover;
146 do { rover++;
147 if (rover > high)
148 rover = low;
149 head = &tcp_bhash[tcp_bhashfn(rover)];
150 spin_lock(&head->lock);
151 tb_for_each(tb, node, &head->chain)
152 if (tb->port == rover)
153 goto next;
154 break;
155 next:
156 spin_unlock(&head->lock);
157 } while (--remaining > 0);
158 tcp_port_rover = rover;
159 spin_unlock(&tcp_portalloc_lock);
161 /* Exhausted local port range during search? */
162 ret = 1;
163 if (remaining <= 0)
164 goto fail;
166 /* OK, here is the one we will use. */
167 snum = rover;
168 } else {
169 head = &tcp_bhash[tcp_bhashfn(snum)];
170 spin_lock(&head->lock);
171 tb_for_each(tb, node, &head->chain)
172 if (tb->port == snum)
173 goto tb_found;
175 tb = NULL;
176 goto tb_not_found;
177 tb_found:
178 if (tb && !hlist_empty(&tb->owners)) {
179 if (tb->fastreuse > 0 && sk->sk_reuse &&
180 sk->sk_state != TCP_LISTEN) {
181 goto success;
182 } else {
183 ret = 1;
184 if (tcp_v6_bind_conflict(sk, tb))
185 goto fail_unlock;
188 tb_not_found:
189 ret = 1;
190 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
191 goto fail_unlock;
192 if (hlist_empty(&tb->owners)) {
193 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
194 tb->fastreuse = 1;
195 else
196 tb->fastreuse = 0;
197 } else if (tb->fastreuse &&
198 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
199 tb->fastreuse = 0;
201 success:
202 if (!tcp_sk(sk)->bind_hash)
203 tcp_bind_hash(sk, tb, snum);
204 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
205 ret = 0;
207 fail_unlock:
208 spin_unlock(&head->lock);
209 fail:
210 local_bh_enable();
211 return ret;
214 static __inline__ void __tcp_v6_hash(struct sock *sk)
216 struct hlist_head *list;
217 rwlock_t *lock;
219 BUG_TRAP(sk_unhashed(sk));
221 if (sk->sk_state == TCP_LISTEN) {
222 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
223 lock = &tcp_lhash_lock;
224 tcp_listen_wlock();
225 } else {
226 sk->sk_hashent = tcp_v6_sk_hashfn(sk);
227 list = &tcp_ehash[sk->sk_hashent].chain;
228 lock = &tcp_ehash[sk->sk_hashent].lock;
229 write_lock(lock);
232 __sk_add_node(sk, list);
233 sock_prot_inc_use(sk->sk_prot);
234 write_unlock(lock);
238 static void tcp_v6_hash(struct sock *sk)
240 if (sk->sk_state != TCP_CLOSE) {
241 struct tcp_sock *tp = tcp_sk(sk);
243 if (tp->af_specific == &ipv6_mapped) {
244 tcp_prot.hash(sk);
245 return;
247 local_bh_disable();
248 __tcp_v6_hash(sk);
249 local_bh_enable();
253 static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
255 struct sock *sk;
256 struct hlist_node *node;
257 struct sock *result = NULL;
258 int score, hiscore;
260 hiscore=0;
261 read_lock(&tcp_lhash_lock);
262 sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
263 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
264 struct ipv6_pinfo *np = inet6_sk(sk);
266 score = 1;
267 if (!ipv6_addr_any(&np->rcv_saddr)) {
268 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
269 continue;
270 score++;
272 if (sk->sk_bound_dev_if) {
273 if (sk->sk_bound_dev_if != dif)
274 continue;
275 score++;
277 if (score == 3) {
278 result = sk;
279 break;
281 if (score > hiscore) {
282 hiscore = score;
283 result = sk;
287 if (result)
288 sock_hold(result);
289 read_unlock(&tcp_lhash_lock);
290 return result;
293 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
294 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
296 * The sockhash lock must be held as a reader here.
299 static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
300 struct in6_addr *daddr, u16 hnum,
301 int dif)
303 struct tcp_ehash_bucket *head;
304 struct sock *sk;
305 struct hlist_node *node;
306 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
307 int hash;
309 /* Optimize here for direct hit, only listening connections can
310 * have wildcards anyways.
312 hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
313 head = &tcp_ehash[hash];
314 read_lock(&head->lock);
315 sk_for_each(sk, node, &head->chain) {
316 /* For IPV6 do the cheaper port and family tests first. */
317 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
318 goto hit; /* You sunk my battleship! */
320 /* Must check for a TIME_WAIT'er before going to listener hash. */
321 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
322 /* FIXME: acme: check this... */
323 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
325 if(*((__u32 *)&(tw->tw_dport)) == ports &&
326 sk->sk_family == PF_INET6) {
327 if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
328 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
329 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
330 goto hit;
333 read_unlock(&head->lock);
334 return NULL;
336 hit:
337 sock_hold(sk);
338 read_unlock(&head->lock);
339 return sk;
343 static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
344 struct in6_addr *daddr, u16 hnum,
345 int dif)
347 struct sock *sk;
349 sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
351 if (sk)
352 return sk;
354 return tcp_v6_lookup_listener(daddr, hnum, dif);
357 inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
358 struct in6_addr *daddr, u16 dport,
359 int dif)
361 struct sock *sk;
363 local_bh_disable();
364 sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
365 local_bh_enable();
367 return sk;
370 EXPORT_SYMBOL_GPL(tcp_v6_lookup);
374 * Open request hash tables.
377 static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
379 u32 a, b, c;
381 a = raddr->s6_addr32[0];
382 b = raddr->s6_addr32[1];
383 c = raddr->s6_addr32[2];
385 a += JHASH_GOLDEN_RATIO;
386 b += JHASH_GOLDEN_RATIO;
387 c += rnd;
388 __jhash_mix(a, b, c);
390 a += raddr->s6_addr32[3];
391 b += (u32) rport;
392 __jhash_mix(a, b, c);
394 return c & (TCP_SYNQ_HSIZE - 1);
397 static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp,
398 struct request_sock ***prevp,
399 __u16 rport,
400 struct in6_addr *raddr,
401 struct in6_addr *laddr,
402 int iif)
404 struct listen_sock *lopt = tp->accept_queue.listen_opt;
405 struct request_sock *req, **prev;
407 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
408 (req = *prev) != NULL;
409 prev = &req->dl_next) {
410 const struct tcp6_request_sock *treq = tcp6_rsk(req);
412 if (inet_rsk(req)->rmt_port == rport &&
413 req->rsk_ops->family == AF_INET6 &&
414 ipv6_addr_equal(&treq->rmt_addr, raddr) &&
415 ipv6_addr_equal(&treq->loc_addr, laddr) &&
416 (!treq->iif || treq->iif == iif)) {
417 BUG_TRAP(req->sk == NULL);
418 *prevp = prev;
419 return req;
423 return NULL;
426 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
427 struct in6_addr *saddr,
428 struct in6_addr *daddr,
429 unsigned long base)
431 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
434 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
436 if (skb->protocol == htons(ETH_P_IPV6)) {
437 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
438 skb->nh.ipv6h->saddr.s6_addr32,
439 skb->h.th->dest,
440 skb->h.th->source);
441 } else {
442 return secure_tcp_sequence_number(skb->nh.iph->daddr,
443 skb->nh.iph->saddr,
444 skb->h.th->dest,
445 skb->h.th->source);
449 static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
450 struct tcp_tw_bucket **twp)
452 struct inet_sock *inet = inet_sk(sk);
453 struct ipv6_pinfo *np = inet6_sk(sk);
454 struct in6_addr *daddr = &np->rcv_saddr;
455 struct in6_addr *saddr = &np->daddr;
456 int dif = sk->sk_bound_dev_if;
457 u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
458 int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
459 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
460 struct sock *sk2;
461 struct hlist_node *node;
462 struct tcp_tw_bucket *tw;
464 write_lock(&head->lock);
466 /* Check TIME-WAIT sockets first. */
467 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
468 tw = (struct tcp_tw_bucket*)sk2;
470 if(*((__u32 *)&(tw->tw_dport)) == ports &&
471 sk2->sk_family == PF_INET6 &&
472 ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
473 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
474 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
475 struct tcp_sock *tp = tcp_sk(sk);
477 if (tw->tw_ts_recent_stamp &&
478 (!twp || (sysctl_tcp_tw_reuse &&
479 xtime.tv_sec -
480 tw->tw_ts_recent_stamp > 1))) {
481 /* See comment in tcp_ipv4.c */
482 tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
483 if (!tp->write_seq)
484 tp->write_seq = 1;
485 tp->rx_opt.ts_recent = tw->tw_ts_recent;
486 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
487 sock_hold(sk2);
488 goto unique;
489 } else
490 goto not_unique;
493 tw = NULL;
495 /* And established part... */
496 sk_for_each(sk2, node, &head->chain) {
497 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif))
498 goto not_unique;
501 unique:
502 BUG_TRAP(sk_unhashed(sk));
503 __sk_add_node(sk, &head->chain);
504 sk->sk_hashent = hash;
505 sock_prot_inc_use(sk->sk_prot);
506 write_unlock(&head->lock);
508 if (twp) {
509 *twp = tw;
510 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
511 } else if (tw) {
512 /* Silly. Should hash-dance instead... */
513 tcp_tw_deschedule(tw);
514 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
516 tcp_tw_put(tw);
518 return 0;
520 not_unique:
521 write_unlock(&head->lock);
522 return -EADDRNOTAVAIL;
525 static inline u32 tcpv6_port_offset(const struct sock *sk)
527 const struct inet_sock *inet = inet_sk(sk);
528 const struct ipv6_pinfo *np = inet6_sk(sk);
530 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
531 np->daddr.s6_addr32,
532 inet->dport);
535 static int tcp_v6_hash_connect(struct sock *sk)
537 unsigned short snum = inet_sk(sk)->num;
538 struct tcp_bind_hashbucket *head;
539 struct tcp_bind_bucket *tb;
540 int ret;
542 if (!snum) {
543 int low = sysctl_local_port_range[0];
544 int high = sysctl_local_port_range[1];
545 int range = high - low;
546 int i;
547 int port;
548 static u32 hint;
549 u32 offset = hint + tcpv6_port_offset(sk);
550 struct hlist_node *node;
551 struct tcp_tw_bucket *tw = NULL;
553 local_bh_disable();
554 for (i = 1; i <= range; i++) {
555 port = low + (i + offset) % range;
556 head = &tcp_bhash[tcp_bhashfn(port)];
557 spin_lock(&head->lock);
559 /* Does not bother with rcv_saddr checks,
560 * because the established check is already
561 * unique enough.
563 tb_for_each(tb, node, &head->chain) {
564 if (tb->port == port) {
565 BUG_TRAP(!hlist_empty(&tb->owners));
566 if (tb->fastreuse >= 0)
567 goto next_port;
568 if (!__tcp_v6_check_established(sk,
569 port,
570 &tw))
571 goto ok;
572 goto next_port;
576 tb = tcp_bucket_create(head, port);
577 if (!tb) {
578 spin_unlock(&head->lock);
579 break;
581 tb->fastreuse = -1;
582 goto ok;
584 next_port:
585 spin_unlock(&head->lock);
587 local_bh_enable();
589 return -EADDRNOTAVAIL;
592 hint += i;
594 /* Head lock still held and bh's disabled */
595 tcp_bind_hash(sk, tb, port);
596 if (sk_unhashed(sk)) {
597 inet_sk(sk)->sport = htons(port);
598 __tcp_v6_hash(sk);
600 spin_unlock(&head->lock);
602 if (tw) {
603 tcp_tw_deschedule(tw);
604 tcp_tw_put(tw);
607 ret = 0;
608 goto out;
611 head = &tcp_bhash[tcp_bhashfn(snum)];
612 tb = tcp_sk(sk)->bind_hash;
613 spin_lock_bh(&head->lock);
615 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
616 __tcp_v6_hash(sk);
617 spin_unlock_bh(&head->lock);
618 return 0;
619 } else {
620 spin_unlock(&head->lock);
621 /* No definite answer... Walk to established hash table */
622 ret = __tcp_v6_check_established(sk, snum, NULL);
623 out:
624 local_bh_enable();
625 return ret;
629 static __inline__ int tcp_v6_iif(struct sk_buff *skb)
631 return IP6CB(skb)->iif;
634 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
635 int addr_len)
637 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
638 struct inet_sock *inet = inet_sk(sk);
639 struct ipv6_pinfo *np = inet6_sk(sk);
640 struct tcp_sock *tp = tcp_sk(sk);
641 struct in6_addr *saddr = NULL, *final_p = NULL, final;
642 struct flowi fl;
643 struct dst_entry *dst;
644 int addr_type;
645 int err;
647 if (addr_len < SIN6_LEN_RFC2133)
648 return -EINVAL;
650 if (usin->sin6_family != AF_INET6)
651 return(-EAFNOSUPPORT);
653 memset(&fl, 0, sizeof(fl));
655 if (np->sndflow) {
656 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
657 IP6_ECN_flow_init(fl.fl6_flowlabel);
658 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
659 struct ip6_flowlabel *flowlabel;
660 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
661 if (flowlabel == NULL)
662 return -EINVAL;
663 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
664 fl6_sock_release(flowlabel);
669 * connect() to INADDR_ANY means loopback (BSD'ism).
672 if(ipv6_addr_any(&usin->sin6_addr))
673 usin->sin6_addr.s6_addr[15] = 0x1;
675 addr_type = ipv6_addr_type(&usin->sin6_addr);
677 if(addr_type & IPV6_ADDR_MULTICAST)
678 return -ENETUNREACH;
680 if (addr_type&IPV6_ADDR_LINKLOCAL) {
681 if (addr_len >= sizeof(struct sockaddr_in6) &&
682 usin->sin6_scope_id) {
683 /* If interface is set while binding, indices
684 * must coincide.
686 if (sk->sk_bound_dev_if &&
687 sk->sk_bound_dev_if != usin->sin6_scope_id)
688 return -EINVAL;
690 sk->sk_bound_dev_if = usin->sin6_scope_id;
693 /* Connect to link-local address requires an interface */
694 if (!sk->sk_bound_dev_if)
695 return -EINVAL;
698 if (tp->rx_opt.ts_recent_stamp &&
699 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
700 tp->rx_opt.ts_recent = 0;
701 tp->rx_opt.ts_recent_stamp = 0;
702 tp->write_seq = 0;
705 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
706 np->flow_label = fl.fl6_flowlabel;
709 * TCP over IPv4
712 if (addr_type == IPV6_ADDR_MAPPED) {
713 u32 exthdrlen = tp->ext_header_len;
714 struct sockaddr_in sin;
716 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
718 if (__ipv6_only_sock(sk))
719 return -ENETUNREACH;
721 sin.sin_family = AF_INET;
722 sin.sin_port = usin->sin6_port;
723 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
725 tp->af_specific = &ipv6_mapped;
726 sk->sk_backlog_rcv = tcp_v4_do_rcv;
728 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
730 if (err) {
731 tp->ext_header_len = exthdrlen;
732 tp->af_specific = &ipv6_specific;
733 sk->sk_backlog_rcv = tcp_v6_do_rcv;
734 goto failure;
735 } else {
736 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
737 inet->saddr);
738 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
739 inet->rcv_saddr);
742 return err;
745 if (!ipv6_addr_any(&np->rcv_saddr))
746 saddr = &np->rcv_saddr;
748 fl.proto = IPPROTO_TCP;
749 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
750 ipv6_addr_copy(&fl.fl6_src,
751 (saddr ? saddr : &np->saddr));
752 fl.oif = sk->sk_bound_dev_if;
753 fl.fl_ip_dport = usin->sin6_port;
754 fl.fl_ip_sport = inet->sport;
756 if (np->opt && np->opt->srcrt) {
757 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
758 ipv6_addr_copy(&final, &fl.fl6_dst);
759 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
760 final_p = &final;
763 err = ip6_dst_lookup(sk, &dst, &fl);
764 if (err)
765 goto failure;
766 if (final_p)
767 ipv6_addr_copy(&fl.fl6_dst, final_p);
769 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
770 dst_release(dst);
771 goto failure;
774 if (saddr == NULL) {
775 saddr = &fl.fl6_src;
776 ipv6_addr_copy(&np->rcv_saddr, saddr);
779 /* set the source address */
780 ipv6_addr_copy(&np->saddr, saddr);
781 inet->rcv_saddr = LOOPBACK4_IPV6;
783 ip6_dst_store(sk, dst, NULL);
784 sk->sk_route_caps = dst->dev->features &
785 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
787 tp->ext_header_len = 0;
788 if (np->opt)
789 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
791 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
793 inet->dport = usin->sin6_port;
795 tcp_set_state(sk, TCP_SYN_SENT);
796 err = tcp_v6_hash_connect(sk);
797 if (err)
798 goto late_failure;
800 if (!tp->write_seq)
801 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
802 np->daddr.s6_addr32,
803 inet->sport,
804 inet->dport);
806 err = tcp_connect(sk);
807 if (err)
808 goto late_failure;
810 return 0;
812 late_failure:
813 tcp_set_state(sk, TCP_CLOSE);
814 __sk_dst_reset(sk);
815 failure:
816 inet->dport = 0;
817 sk->sk_route_caps = 0;
818 return err;
821 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
822 int type, int code, int offset, __u32 info)
824 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
825 struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
826 struct ipv6_pinfo *np;
827 struct sock *sk;
828 int err;
829 struct tcp_sock *tp;
830 __u32 seq;
832 sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
834 if (sk == NULL) {
835 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
836 return;
839 if (sk->sk_state == TCP_TIME_WAIT) {
840 tcp_tw_put((struct tcp_tw_bucket*)sk);
841 return;
844 bh_lock_sock(sk);
845 if (sock_owned_by_user(sk))
846 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
848 if (sk->sk_state == TCP_CLOSE)
849 goto out;
851 tp = tcp_sk(sk);
852 seq = ntohl(th->seq);
853 if (sk->sk_state != TCP_LISTEN &&
854 !between(seq, tp->snd_una, tp->snd_nxt)) {
855 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
856 goto out;
859 np = inet6_sk(sk);
861 if (type == ICMPV6_PKT_TOOBIG) {
862 struct dst_entry *dst = NULL;
864 if (sock_owned_by_user(sk))
865 goto out;
866 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
867 goto out;
869 /* icmp should have updated the destination cache entry */
870 dst = __sk_dst_check(sk, np->dst_cookie);
872 if (dst == NULL) {
873 struct inet_sock *inet = inet_sk(sk);
874 struct flowi fl;
876 /* BUGGG_FUTURE: Again, it is not clear how
877 to handle rthdr case. Ignore this complexity
878 for now.
880 memset(&fl, 0, sizeof(fl));
881 fl.proto = IPPROTO_TCP;
882 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
883 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
884 fl.oif = sk->sk_bound_dev_if;
885 fl.fl_ip_dport = inet->dport;
886 fl.fl_ip_sport = inet->sport;
888 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
889 sk->sk_err_soft = -err;
890 goto out;
893 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
894 sk->sk_err_soft = -err;
895 goto out;
898 } else
899 dst_hold(dst);
901 if (tp->pmtu_cookie > dst_mtu(dst)) {
902 tcp_sync_mss(sk, dst_mtu(dst));
903 tcp_simple_retransmit(sk);
904 } /* else let the usual retransmit timer handle it */
905 dst_release(dst);
906 goto out;
909 icmpv6_err_convert(type, code, &err);
911 /* Might be for an request_sock */
912 switch (sk->sk_state) {
913 struct request_sock *req, **prev;
914 case TCP_LISTEN:
915 if (sock_owned_by_user(sk))
916 goto out;
918 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
919 &hdr->saddr, tcp_v6_iif(skb));
920 if (!req)
921 goto out;
923 /* ICMPs are not backlogged, hence we cannot get
924 * an established socket here.
926 BUG_TRAP(req->sk == NULL);
928 if (seq != tcp_rsk(req)->snt_isn) {
929 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
930 goto out;
933 tcp_synq_drop(sk, req, prev);
934 goto out;
936 case TCP_SYN_SENT:
937 case TCP_SYN_RECV: /* Cannot happen.
938 It can, it SYNs are crossed. --ANK */
939 if (!sock_owned_by_user(sk)) {
940 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
941 sk->sk_err = err;
942 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
944 tcp_done(sk);
945 } else
946 sk->sk_err_soft = err;
947 goto out;
950 if (!sock_owned_by_user(sk) && np->recverr) {
951 sk->sk_err = err;
952 sk->sk_error_report(sk);
953 } else
954 sk->sk_err_soft = err;
956 out:
957 bh_unlock_sock(sk);
958 sock_put(sk);
962 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
963 struct dst_entry *dst)
965 struct tcp6_request_sock *treq = tcp6_rsk(req);
966 struct ipv6_pinfo *np = inet6_sk(sk);
967 struct sk_buff * skb;
968 struct ipv6_txoptions *opt = NULL;
969 struct in6_addr * final_p = NULL, final;
970 struct flowi fl;
971 int err = -1;
973 memset(&fl, 0, sizeof(fl));
974 fl.proto = IPPROTO_TCP;
975 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
976 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
977 fl.fl6_flowlabel = 0;
978 fl.oif = treq->iif;
979 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
980 fl.fl_ip_sport = inet_sk(sk)->sport;
982 if (dst == NULL) {
983 opt = np->opt;
984 if (opt == NULL &&
985 np->rxopt.bits.srcrt == 2 &&
986 treq->pktopts) {
987 struct sk_buff *pktopts = treq->pktopts;
988 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
989 if (rxopt->srcrt)
990 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
993 if (opt && opt->srcrt) {
994 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
995 ipv6_addr_copy(&final, &fl.fl6_dst);
996 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
997 final_p = &final;
1000 err = ip6_dst_lookup(sk, &dst, &fl);
1001 if (err)
1002 goto done;
1003 if (final_p)
1004 ipv6_addr_copy(&fl.fl6_dst, final_p);
1005 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1006 goto done;
1009 skb = tcp_make_synack(sk, dst, req);
1010 if (skb) {
1011 struct tcphdr *th = skb->h.th;
1013 th->check = tcp_v6_check(th, skb->len,
1014 &treq->loc_addr, &treq->rmt_addr,
1015 csum_partial((char *)th, skb->len, skb->csum));
1017 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1018 err = ip6_xmit(sk, skb, &fl, opt, 0);
1019 if (err == NET_XMIT_CN)
1020 err = 0;
1023 done:
1024 dst_release(dst);
1025 if (opt && opt != np->opt)
1026 sock_kfree_s(sk, opt, opt->tot_len);
1027 return err;
1030 static void tcp_v6_reqsk_destructor(struct request_sock *req)
1032 if (tcp6_rsk(req)->pktopts)
1033 kfree_skb(tcp6_rsk(req)->pktopts);
1036 static struct request_sock_ops tcp6_request_sock_ops = {
1037 .family = AF_INET6,
1038 .obj_size = sizeof(struct tcp6_request_sock),
1039 .rtx_syn_ack = tcp_v6_send_synack,
1040 .send_ack = tcp_v6_reqsk_send_ack,
1041 .destructor = tcp_v6_reqsk_destructor,
1042 .send_reset = tcp_v6_send_reset
1045 static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
1047 struct ipv6_pinfo *np = inet6_sk(sk);
1048 struct inet6_skb_parm *opt = IP6CB(skb);
1050 if (np->rxopt.all) {
1051 if ((opt->hop && np->rxopt.bits.hopopts) ||
1052 ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) &&
1053 np->rxopt.bits.rxflow) ||
1054 (opt->srcrt && np->rxopt.bits.srcrt) ||
1055 ((opt->dst1 || opt->dst0) && np->rxopt.bits.dstopts))
1056 return 1;
1058 return 0;
1062 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
1063 struct sk_buff *skb)
1065 struct ipv6_pinfo *np = inet6_sk(sk);
1067 if (skb->ip_summed == CHECKSUM_HW) {
1068 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
1069 skb->csum = offsetof(struct tcphdr, check);
1070 } else {
1071 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
1072 csum_partial((char *)th, th->doff<<2,
1073 skb->csum));
1078 static void tcp_v6_send_reset(struct sk_buff *skb)
1080 struct tcphdr *th = skb->h.th, *t1;
1081 struct sk_buff *buff;
1082 struct flowi fl;
1084 if (th->rst)
1085 return;
1087 if (!ipv6_unicast_destination(skb))
1088 return;
1091 * We need to grab some memory, and put together an RST,
1092 * and then put it into the queue to be sent.
1095 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
1096 GFP_ATOMIC);
1097 if (buff == NULL)
1098 return;
1100 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
1102 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
1104 /* Swap the send and the receive. */
1105 memset(t1, 0, sizeof(*t1));
1106 t1->dest = th->source;
1107 t1->source = th->dest;
1108 t1->doff = sizeof(*t1)/4;
1109 t1->rst = 1;
1111 if(th->ack) {
1112 t1->seq = th->ack_seq;
1113 } else {
1114 t1->ack = 1;
1115 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1116 + skb->len - (th->doff<<2));
1119 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1121 memset(&fl, 0, sizeof(fl));
1122 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1123 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1125 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1126 sizeof(*t1), IPPROTO_TCP,
1127 buff->csum);
1129 fl.proto = IPPROTO_TCP;
1130 fl.oif = tcp_v6_iif(skb);
1131 fl.fl_ip_dport = t1->dest;
1132 fl.fl_ip_sport = t1->source;
1134 /* sk = NULL, but it is safe for now. RST socket required. */
1135 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1137 if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0) {
1138 dst_release(buff->dst);
1139 return;
1142 ip6_xmit(NULL, buff, &fl, NULL, 0);
1143 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1144 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1145 return;
1148 kfree_skb(buff);
1151 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1153 struct tcphdr *th = skb->h.th, *t1;
1154 struct sk_buff *buff;
1155 struct flowi fl;
1156 int tot_len = sizeof(struct tcphdr);
1158 if (ts)
1159 tot_len += 3*4;
1161 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1162 GFP_ATOMIC);
1163 if (buff == NULL)
1164 return;
1166 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1168 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1170 /* Swap the send and the receive. */
1171 memset(t1, 0, sizeof(*t1));
1172 t1->dest = th->source;
1173 t1->source = th->dest;
1174 t1->doff = tot_len/4;
1175 t1->seq = htonl(seq);
1176 t1->ack_seq = htonl(ack);
1177 t1->ack = 1;
1178 t1->window = htons(win);
1180 if (ts) {
1181 u32 *ptr = (u32*)(t1 + 1);
1182 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1183 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1184 *ptr++ = htonl(tcp_time_stamp);
1185 *ptr = htonl(ts);
1188 buff->csum = csum_partial((char *)t1, tot_len, 0);
1190 memset(&fl, 0, sizeof(fl));
1191 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1192 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1194 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1195 tot_len, IPPROTO_TCP,
1196 buff->csum);
1198 fl.proto = IPPROTO_TCP;
1199 fl.oif = tcp_v6_iif(skb);
1200 fl.fl_ip_dport = t1->dest;
1201 fl.fl_ip_sport = t1->source;
1203 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1204 if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0) {
1205 dst_release(buff->dst);
1206 return;
1208 ip6_xmit(NULL, buff, &fl, NULL, 0);
1209 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1210 return;
1213 kfree_skb(buff);
1216 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1218 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1220 tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1221 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1223 tcp_tw_put(tw);
1226 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1228 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1232 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1234 struct request_sock *req, **prev;
1235 struct tcphdr *th = skb->h.th;
1236 struct tcp_sock *tp = tcp_sk(sk);
1237 struct sock *nsk;
1239 /* Find possible connection requests. */
1240 req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr,
1241 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
1242 if (req)
1243 return tcp_check_req(sk, skb, req, prev);
1245 nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr,
1246 th->source,
1247 &skb->nh.ipv6h->daddr,
1248 ntohs(th->dest),
1249 tcp_v6_iif(skb));
1251 if (nsk) {
1252 if (nsk->sk_state != TCP_TIME_WAIT) {
1253 bh_lock_sock(nsk);
1254 return nsk;
1256 tcp_tw_put((struct tcp_tw_bucket*)nsk);
1257 return NULL;
1260 #if 0 /*def CONFIG_SYN_COOKIES*/
1261 if (!th->rst && !th->syn && th->ack)
1262 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1263 #endif
1264 return sk;
1267 static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
1269 struct tcp_sock *tp = tcp_sk(sk);
1270 struct listen_sock *lopt = tp->accept_queue.listen_opt;
1271 u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
1273 reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
1274 tcp_synq_added(sk);
1278 /* FIXME: this is substantially similar to the ipv4 code.
1279 * Can some kind of merge be done? -- erics
1281 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1283 struct tcp6_request_sock *treq;
1284 struct ipv6_pinfo *np = inet6_sk(sk);
1285 struct tcp_options_received tmp_opt;
1286 struct tcp_sock *tp = tcp_sk(sk);
1287 struct request_sock *req = NULL;
1288 __u32 isn = TCP_SKB_CB(skb)->when;
1290 if (skb->protocol == htons(ETH_P_IP))
1291 return tcp_v4_conn_request(sk, skb);
1293 if (!ipv6_unicast_destination(skb))
1294 goto drop;
1297 * There are no SYN attacks on IPv6, yet...
1299 if (tcp_synq_is_full(sk) && !isn) {
1300 if (net_ratelimit())
1301 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1302 goto drop;
1305 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1306 goto drop;
1308 req = reqsk_alloc(&tcp6_request_sock_ops);
1309 if (req == NULL)
1310 goto drop;
1312 tcp_clear_options(&tmp_opt);
1313 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1314 tmp_opt.user_mss = tp->rx_opt.user_mss;
1316 tcp_parse_options(skb, &tmp_opt, 0);
1318 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1319 tcp_openreq_init(req, &tmp_opt, skb);
1321 treq = tcp6_rsk(req);
1322 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1323 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1324 TCP_ECN_create_request(req, skb->h.th);
1325 treq->pktopts = NULL;
1326 if (ipv6_opt_accepted(sk, skb) ||
1327 np->rxopt.bits.rxinfo ||
1328 np->rxopt.bits.rxhlim) {
1329 atomic_inc(&skb->users);
1330 treq->pktopts = skb;
1332 treq->iif = sk->sk_bound_dev_if;
1334 /* So that link locals have meaning */
1335 if (!sk->sk_bound_dev_if &&
1336 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1337 treq->iif = tcp_v6_iif(skb);
1339 if (isn == 0)
1340 isn = tcp_v6_init_sequence(sk,skb);
1342 tcp_rsk(req)->snt_isn = isn;
1344 if (tcp_v6_send_synack(sk, req, NULL))
1345 goto drop;
1347 tcp_v6_synq_add(sk, req);
1349 return 0;
1351 drop:
1352 if (req)
1353 reqsk_free(req);
1355 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1356 return 0; /* don't send reset */
1359 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1360 struct request_sock *req,
1361 struct dst_entry *dst)
1363 struct tcp6_request_sock *treq = tcp6_rsk(req);
1364 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1365 struct tcp6_sock *newtcp6sk;
1366 struct inet_sock *newinet;
1367 struct tcp_sock *newtp;
1368 struct sock *newsk;
1369 struct ipv6_txoptions *opt;
1371 if (skb->protocol == htons(ETH_P_IP)) {
1373 * v6 mapped
1376 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1378 if (newsk == NULL)
1379 return NULL;
1381 newtcp6sk = (struct tcp6_sock *)newsk;
1382 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1384 newinet = inet_sk(newsk);
1385 newnp = inet6_sk(newsk);
1386 newtp = tcp_sk(newsk);
1388 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1390 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1391 newinet->daddr);
1393 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1394 newinet->saddr);
1396 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1398 newtp->af_specific = &ipv6_mapped;
1399 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1400 newnp->pktoptions = NULL;
1401 newnp->opt = NULL;
1402 newnp->mcast_oif = tcp_v6_iif(skb);
1403 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1405 /* Charge newly allocated IPv6 socket. Though it is mapped,
1406 * it is IPv6 yet.
1408 #ifdef INET_REFCNT_DEBUG
1409 atomic_inc(&inet6_sock_nr);
1410 #endif
1412 /* It is tricky place. Until this moment IPv4 tcp
1413 worked with IPv6 af_tcp.af_specific.
1414 Sync it now.
1416 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1418 return newsk;
1421 opt = np->opt;
1423 if (sk_acceptq_is_full(sk))
1424 goto out_overflow;
1426 if (np->rxopt.bits.srcrt == 2 &&
1427 opt == NULL && treq->pktopts) {
1428 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1429 if (rxopt->srcrt)
1430 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1433 if (dst == NULL) {
1434 struct in6_addr *final_p = NULL, final;
1435 struct flowi fl;
1437 memset(&fl, 0, sizeof(fl));
1438 fl.proto = IPPROTO_TCP;
1439 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1440 if (opt && opt->srcrt) {
1441 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1442 ipv6_addr_copy(&final, &fl.fl6_dst);
1443 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1444 final_p = &final;
1446 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1447 fl.oif = sk->sk_bound_dev_if;
1448 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1449 fl.fl_ip_sport = inet_sk(sk)->sport;
1451 if (ip6_dst_lookup(sk, &dst, &fl))
1452 goto out;
1454 if (final_p)
1455 ipv6_addr_copy(&fl.fl6_dst, final_p);
1457 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1458 goto out;
1461 newsk = tcp_create_openreq_child(sk, req, skb);
1462 if (newsk == NULL)
1463 goto out;
1465 /* Charge newly allocated IPv6 socket */
1466 #ifdef INET_REFCNT_DEBUG
1467 atomic_inc(&inet6_sock_nr);
1468 #endif
1470 ip6_dst_store(newsk, dst, NULL);
1471 newsk->sk_route_caps = dst->dev->features &
1472 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1474 newtcp6sk = (struct tcp6_sock *)newsk;
1475 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1477 newtp = tcp_sk(newsk);
1478 newinet = inet_sk(newsk);
1479 newnp = inet6_sk(newsk);
1481 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1483 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1484 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1485 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1486 newsk->sk_bound_dev_if = treq->iif;
1488 /* Now IPv6 options...
1490 First: no IPv4 options.
1492 newinet->opt = NULL;
1494 /* Clone RX bits */
1495 newnp->rxopt.all = np->rxopt.all;
1497 /* Clone pktoptions received with SYN */
1498 newnp->pktoptions = NULL;
1499 if (treq->pktopts != NULL) {
1500 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1501 kfree_skb(treq->pktopts);
1502 treq->pktopts = NULL;
1503 if (newnp->pktoptions)
1504 skb_set_owner_r(newnp->pktoptions, newsk);
1506 newnp->opt = NULL;
1507 newnp->mcast_oif = tcp_v6_iif(skb);
1508 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1510 /* Clone native IPv6 options from listening socket (if any)
1512 Yes, keeping reference count would be much more clever,
1513 but we make one more one thing there: reattach optmem
1514 to newsk.
1516 if (opt) {
1517 newnp->opt = ipv6_dup_options(newsk, opt);
1518 if (opt != np->opt)
1519 sock_kfree_s(sk, opt, opt->tot_len);
1522 newtp->ext_header_len = 0;
1523 if (newnp->opt)
1524 newtp->ext_header_len = newnp->opt->opt_nflen +
1525 newnp->opt->opt_flen;
1527 tcp_sync_mss(newsk, dst_mtu(dst));
1528 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1529 tcp_initialize_rcv_mss(newsk);
1531 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1533 __tcp_v6_hash(newsk);
1534 tcp_inherit_port(sk, newsk);
1536 return newsk;
1538 out_overflow:
1539 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1540 out:
1541 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1542 if (opt && opt != np->opt)
1543 sock_kfree_s(sk, opt, opt->tot_len);
1544 dst_release(dst);
1545 return NULL;
1548 static int tcp_v6_checksum_init(struct sk_buff *skb)
1550 if (skb->ip_summed == CHECKSUM_HW) {
1551 skb->ip_summed = CHECKSUM_UNNECESSARY;
1552 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1553 &skb->nh.ipv6h->daddr,skb->csum))
1554 return 0;
1555 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v6 csum failed\n"));
1557 if (skb->len <= 76) {
1558 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1559 &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
1560 return -1;
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 } else {
1563 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1564 &skb->nh.ipv6h->daddr,0);
1566 return 0;
1569 /* The socket must have it's spinlock held when we get
1570 * here.
1572 * We have a potential double-lock case here, so even when
1573 * doing backlog processing we use the BH locking scheme.
1574 * This is because we cannot sleep with the original spinlock
1575 * held.
1577 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1579 struct ipv6_pinfo *np = inet6_sk(sk);
1580 struct tcp_sock *tp;
1581 struct sk_buff *opt_skb = NULL;
1583 /* Imagine: socket is IPv6. IPv4 packet arrives,
1584 goes to IPv4 receive handler and backlogged.
1585 From backlog it always goes here. Kerboom...
1586 Fortunately, tcp_rcv_established and rcv_established
1587 handle them correctly, but it is not case with
1588 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1591 if (skb->protocol == htons(ETH_P_IP))
1592 return tcp_v4_do_rcv(sk, skb);
1594 if (sk_filter(sk, skb, 0))
1595 goto discard;
1598 * socket locking is here for SMP purposes as backlog rcv
1599 * is currently called with bh processing disabled.
1602 /* Do Stevens' IPV6_PKTOPTIONS.
1604 Yes, guys, it is the only place in our code, where we
1605 may make it not affecting IPv4.
1606 The rest of code is protocol independent,
1607 and I do not like idea to uglify IPv4.
1609 Actually, all the idea behind IPV6_PKTOPTIONS
1610 looks not very well thought. For now we latch
1611 options, received in the last packet, enqueued
1612 by tcp. Feel free to propose better solution.
1613 --ANK (980728)
1615 if (np->rxopt.all)
1616 opt_skb = skb_clone(skb, GFP_ATOMIC);
1618 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1619 TCP_CHECK_TIMER(sk);
1620 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1621 goto reset;
1622 TCP_CHECK_TIMER(sk);
1623 if (opt_skb)
1624 goto ipv6_pktoptions;
1625 return 0;
1628 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1629 goto csum_err;
1631 if (sk->sk_state == TCP_LISTEN) {
1632 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1633 if (!nsk)
1634 goto discard;
1637 * Queue it on the new socket if the new socket is active,
1638 * otherwise we just shortcircuit this and continue with
1639 * the new socket..
1641 if(nsk != sk) {
1642 if (tcp_child_process(sk, nsk, skb))
1643 goto reset;
1644 if (opt_skb)
1645 __kfree_skb(opt_skb);
1646 return 0;
1650 TCP_CHECK_TIMER(sk);
1651 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1652 goto reset;
1653 TCP_CHECK_TIMER(sk);
1654 if (opt_skb)
1655 goto ipv6_pktoptions;
1656 return 0;
1658 reset:
1659 tcp_v6_send_reset(skb);
1660 discard:
1661 if (opt_skb)
1662 __kfree_skb(opt_skb);
1663 kfree_skb(skb);
1664 return 0;
1665 csum_err:
1666 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1667 goto discard;
1670 ipv6_pktoptions:
1671 /* Do you ask, what is it?
1673 1. skb was enqueued by tcp.
1674 2. skb is added to tail of read queue, rather than out of order.
1675 3. socket is not in passive state.
1676 4. Finally, it really contains options, which user wants to receive.
1678 tp = tcp_sk(sk);
1679 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1680 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1681 if (np->rxopt.bits.rxinfo)
1682 np->mcast_oif = tcp_v6_iif(opt_skb);
1683 if (np->rxopt.bits.rxhlim)
1684 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1685 if (ipv6_opt_accepted(sk, opt_skb)) {
1686 skb_set_owner_r(opt_skb, sk);
1687 opt_skb = xchg(&np->pktoptions, opt_skb);
1688 } else {
1689 __kfree_skb(opt_skb);
1690 opt_skb = xchg(&np->pktoptions, NULL);
1694 if (opt_skb)
1695 kfree_skb(opt_skb);
1696 return 0;
1699 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1701 struct sk_buff *skb = *pskb;
1702 struct tcphdr *th;
1703 struct sock *sk;
1704 int ret;
1706 if (skb->pkt_type != PACKET_HOST)
1707 goto discard_it;
1710 * Count it even if it's bad.
1712 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1714 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1715 goto discard_it;
1717 th = skb->h.th;
1719 if (th->doff < sizeof(struct tcphdr)/4)
1720 goto bad_packet;
1721 if (!pskb_may_pull(skb, th->doff*4))
1722 goto discard_it;
1724 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1725 tcp_v6_checksum_init(skb) < 0))
1726 goto bad_packet;
1728 th = skb->h.th;
1729 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1730 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1731 skb->len - th->doff*4);
1732 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1733 TCP_SKB_CB(skb)->when = 0;
1734 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1735 TCP_SKB_CB(skb)->sacked = 0;
1737 sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source,
1738 &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1740 if (!sk)
1741 goto no_tcp_socket;
1743 process:
1744 if (sk->sk_state == TCP_TIME_WAIT)
1745 goto do_time_wait;
1747 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1748 goto discard_and_relse;
1750 if (sk_filter(sk, skb, 0))
1751 goto discard_and_relse;
1753 skb->dev = NULL;
1755 bh_lock_sock(sk);
1756 ret = 0;
1757 if (!sock_owned_by_user(sk)) {
1758 if (!tcp_prequeue(sk, skb))
1759 ret = tcp_v6_do_rcv(sk, skb);
1760 } else
1761 sk_add_backlog(sk, skb);
1762 bh_unlock_sock(sk);
1764 sock_put(sk);
1765 return ret ? -1 : 0;
1767 no_tcp_socket:
1768 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1769 goto discard_it;
1771 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1772 bad_packet:
1773 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1774 } else {
1775 tcp_v6_send_reset(skb);
1778 discard_it:
1781 * Discard frame
1784 kfree_skb(skb);
1785 return 0;
1787 discard_and_relse:
1788 sock_put(sk);
1789 goto discard_it;
1791 do_time_wait:
1792 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1793 tcp_tw_put((struct tcp_tw_bucket *) sk);
1794 goto discard_it;
1797 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1798 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1799 tcp_tw_put((struct tcp_tw_bucket *) sk);
1800 goto discard_it;
1803 switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1804 skb, th, skb->len)) {
1805 case TCP_TW_SYN:
1807 struct sock *sk2;
1809 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1810 if (sk2 != NULL) {
1811 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1812 tcp_tw_put((struct tcp_tw_bucket *)sk);
1813 sk = sk2;
1814 goto process;
1816 /* Fall through to ACK */
1818 case TCP_TW_ACK:
1819 tcp_v6_timewait_ack(sk, skb);
1820 break;
1821 case TCP_TW_RST:
1822 goto no_tcp_socket;
1823 case TCP_TW_SUCCESS:;
1825 goto discard_it;
1828 static int tcp_v6_rebuild_header(struct sock *sk)
1830 int err;
1831 struct dst_entry *dst;
1832 struct ipv6_pinfo *np = inet6_sk(sk);
1834 dst = __sk_dst_check(sk, np->dst_cookie);
1836 if (dst == NULL) {
1837 struct inet_sock *inet = inet_sk(sk);
1838 struct in6_addr *final_p = NULL, final;
1839 struct flowi fl;
1841 memset(&fl, 0, sizeof(fl));
1842 fl.proto = IPPROTO_TCP;
1843 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1844 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1845 fl.fl6_flowlabel = np->flow_label;
1846 fl.oif = sk->sk_bound_dev_if;
1847 fl.fl_ip_dport = inet->dport;
1848 fl.fl_ip_sport = inet->sport;
1850 if (np->opt && np->opt->srcrt) {
1851 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1852 ipv6_addr_copy(&final, &fl.fl6_dst);
1853 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1854 final_p = &final;
1857 err = ip6_dst_lookup(sk, &dst, &fl);
1858 if (err) {
1859 sk->sk_route_caps = 0;
1860 return err;
1862 if (final_p)
1863 ipv6_addr_copy(&fl.fl6_dst, final_p);
1865 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1866 sk->sk_err_soft = -err;
1867 dst_release(dst);
1868 return err;
1871 ip6_dst_store(sk, dst, NULL);
1872 sk->sk_route_caps = dst->dev->features &
1873 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1876 return 0;
1879 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
1881 struct sock *sk = skb->sk;
1882 struct inet_sock *inet = inet_sk(sk);
1883 struct ipv6_pinfo *np = inet6_sk(sk);
1884 struct flowi fl;
1885 struct dst_entry *dst;
1886 struct in6_addr *final_p = NULL, final;
1888 memset(&fl, 0, sizeof(fl));
1889 fl.proto = IPPROTO_TCP;
1890 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1891 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1892 fl.fl6_flowlabel = np->flow_label;
1893 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
1894 fl.oif = sk->sk_bound_dev_if;
1895 fl.fl_ip_sport = inet->sport;
1896 fl.fl_ip_dport = inet->dport;
1898 if (np->opt && np->opt->srcrt) {
1899 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1900 ipv6_addr_copy(&final, &fl.fl6_dst);
1901 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1902 final_p = &final;
1905 dst = __sk_dst_check(sk, np->dst_cookie);
1907 if (dst == NULL) {
1908 int err = ip6_dst_lookup(sk, &dst, &fl);
1910 if (err) {
1911 sk->sk_err_soft = -err;
1912 return err;
1915 if (final_p)
1916 ipv6_addr_copy(&fl.fl6_dst, final_p);
1918 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1919 sk->sk_route_caps = 0;
1920 dst_release(dst);
1921 return err;
1924 ip6_dst_store(sk, dst, NULL);
1925 sk->sk_route_caps = dst->dev->features &
1926 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1929 skb->dst = dst_clone(dst);
1931 /* Restore final destination back after routing done */
1932 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1934 return ip6_xmit(sk, skb, &fl, np->opt, 0);
1937 static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1939 struct ipv6_pinfo *np = inet6_sk(sk);
1940 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
1942 sin6->sin6_family = AF_INET6;
1943 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
1944 sin6->sin6_port = inet_sk(sk)->dport;
1945 /* We do not store received flowlabel for TCP */
1946 sin6->sin6_flowinfo = 0;
1947 sin6->sin6_scope_id = 0;
1948 if (sk->sk_bound_dev_if &&
1949 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1950 sin6->sin6_scope_id = sk->sk_bound_dev_if;
1953 static int tcp_v6_remember_stamp(struct sock *sk)
1955 /* Alas, not yet... */
1956 return 0;
1959 static struct tcp_func ipv6_specific = {
1960 .queue_xmit = tcp_v6_xmit,
1961 .send_check = tcp_v6_send_check,
1962 .rebuild_header = tcp_v6_rebuild_header,
1963 .conn_request = tcp_v6_conn_request,
1964 .syn_recv_sock = tcp_v6_syn_recv_sock,
1965 .remember_stamp = tcp_v6_remember_stamp,
1966 .net_header_len = sizeof(struct ipv6hdr),
1968 .setsockopt = ipv6_setsockopt,
1969 .getsockopt = ipv6_getsockopt,
1970 .addr2sockaddr = v6_addr2sockaddr,
1971 .sockaddr_len = sizeof(struct sockaddr_in6)
1975 * TCP over IPv4 via INET6 API
1978 static struct tcp_func ipv6_mapped = {
1979 .queue_xmit = ip_queue_xmit,
1980 .send_check = tcp_v4_send_check,
1981 .rebuild_header = tcp_v4_rebuild_header,
1982 .conn_request = tcp_v6_conn_request,
1983 .syn_recv_sock = tcp_v6_syn_recv_sock,
1984 .remember_stamp = tcp_v4_remember_stamp,
1985 .net_header_len = sizeof(struct iphdr),
1987 .setsockopt = ipv6_setsockopt,
1988 .getsockopt = ipv6_getsockopt,
1989 .addr2sockaddr = v6_addr2sockaddr,
1990 .sockaddr_len = sizeof(struct sockaddr_in6)
1995 /* NOTE: A lot of things set to zero explicitly by call to
1996 * sk_alloc() so need not be done here.
1998 static int tcp_v6_init_sock(struct sock *sk)
2000 struct tcp_sock *tp = tcp_sk(sk);
2002 skb_queue_head_init(&tp->out_of_order_queue);
2003 tcp_init_xmit_timers(sk);
2004 tcp_prequeue_init(tp);
2006 tp->rto = TCP_TIMEOUT_INIT;
2007 tp->mdev = TCP_TIMEOUT_INIT;
2009 /* So many TCP implementations out there (incorrectly) count the
2010 * initial SYN frame in their delayed-ACK and congestion control
2011 * algorithms that we must have the following bandaid to talk
2012 * efficiently to them. -DaveM
2014 tp->snd_cwnd = 2;
2016 /* See draft-stevens-tcpca-spec-01 for discussion of the
2017 * initialization of these values.
2019 tp->snd_ssthresh = 0x7fffffff;
2020 tp->snd_cwnd_clamp = ~0;
2021 tp->mss_cache = 536;
2023 tp->reordering = sysctl_tcp_reordering;
2025 sk->sk_state = TCP_CLOSE;
2027 tp->af_specific = &ipv6_specific;
2028 tp->ca_ops = &tcp_init_congestion_ops;
2029 sk->sk_write_space = sk_stream_write_space;
2030 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2032 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2033 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2035 atomic_inc(&tcp_sockets_allocated);
2037 return 0;
2040 static int tcp_v6_destroy_sock(struct sock *sk)
2042 extern int tcp_v4_destroy_sock(struct sock *sk);
2044 tcp_v4_destroy_sock(sk);
2045 return inet6_destroy_sock(sk);
2048 /* Proc filesystem TCPv6 sock list dumping. */
2049 static void get_openreq6(struct seq_file *seq,
2050 struct sock *sk, struct request_sock *req, int i, int uid)
2052 struct in6_addr *dest, *src;
2053 int ttd = req->expires - jiffies;
2055 if (ttd < 0)
2056 ttd = 0;
2058 src = &tcp6_rsk(req)->loc_addr;
2059 dest = &tcp6_rsk(req)->rmt_addr;
2060 seq_printf(seq,
2061 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2062 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2064 src->s6_addr32[0], src->s6_addr32[1],
2065 src->s6_addr32[2], src->s6_addr32[3],
2066 ntohs(inet_sk(sk)->sport),
2067 dest->s6_addr32[0], dest->s6_addr32[1],
2068 dest->s6_addr32[2], dest->s6_addr32[3],
2069 ntohs(inet_rsk(req)->rmt_port),
2070 TCP_SYN_RECV,
2071 0,0, /* could print option size, but that is af dependent. */
2072 1, /* timers active (only the expire timer) */
2073 jiffies_to_clock_t(ttd),
2074 req->retrans,
2075 uid,
2076 0, /* non standard timer */
2077 0, /* open_requests have no inode */
2078 0, req);
2081 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2083 struct in6_addr *dest, *src;
2084 __u16 destp, srcp;
2085 int timer_active;
2086 unsigned long timer_expires;
2087 struct inet_sock *inet = inet_sk(sp);
2088 struct tcp_sock *tp = tcp_sk(sp);
2089 struct ipv6_pinfo *np = inet6_sk(sp);
2091 dest = &np->daddr;
2092 src = &np->rcv_saddr;
2093 destp = ntohs(inet->dport);
2094 srcp = ntohs(inet->sport);
2095 if (tp->pending == TCP_TIME_RETRANS) {
2096 timer_active = 1;
2097 timer_expires = tp->timeout;
2098 } else if (tp->pending == TCP_TIME_PROBE0) {
2099 timer_active = 4;
2100 timer_expires = tp->timeout;
2101 } else if (timer_pending(&sp->sk_timer)) {
2102 timer_active = 2;
2103 timer_expires = sp->sk_timer.expires;
2104 } else {
2105 timer_active = 0;
2106 timer_expires = jiffies;
2109 seq_printf(seq,
2110 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2111 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2113 src->s6_addr32[0], src->s6_addr32[1],
2114 src->s6_addr32[2], src->s6_addr32[3], srcp,
2115 dest->s6_addr32[0], dest->s6_addr32[1],
2116 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2117 sp->sk_state,
2118 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
2119 timer_active,
2120 jiffies_to_clock_t(timer_expires - jiffies),
2121 tp->retransmits,
2122 sock_i_uid(sp),
2123 tp->probes_out,
2124 sock_i_ino(sp),
2125 atomic_read(&sp->sk_refcnt), sp,
2126 tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
2127 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2131 static void get_timewait6_sock(struct seq_file *seq,
2132 struct tcp_tw_bucket *tw, int i)
2134 struct in6_addr *dest, *src;
2135 __u16 destp, srcp;
2136 int ttd = tw->tw_ttd - jiffies;
2138 if (ttd < 0)
2139 ttd = 0;
2141 dest = &tw->tw_v6_daddr;
2142 src = &tw->tw_v6_rcv_saddr;
2143 destp = ntohs(tw->tw_dport);
2144 srcp = ntohs(tw->tw_sport);
2146 seq_printf(seq,
2147 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2148 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2150 src->s6_addr32[0], src->s6_addr32[1],
2151 src->s6_addr32[2], src->s6_addr32[3], srcp,
2152 dest->s6_addr32[0], dest->s6_addr32[1],
2153 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2154 tw->tw_substate, 0, 0,
2155 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2156 atomic_read(&tw->tw_refcnt), tw);
2159 #ifdef CONFIG_PROC_FS
2160 static int tcp6_seq_show(struct seq_file *seq, void *v)
2162 struct tcp_iter_state *st;
2164 if (v == SEQ_START_TOKEN) {
2165 seq_puts(seq,
2166 " sl "
2167 "local_address "
2168 "remote_address "
2169 "st tx_queue rx_queue tr tm->when retrnsmt"
2170 " uid timeout inode\n");
2171 goto out;
2173 st = seq->private;
2175 switch (st->state) {
2176 case TCP_SEQ_STATE_LISTENING:
2177 case TCP_SEQ_STATE_ESTABLISHED:
2178 get_tcp6_sock(seq, v, st->num);
2179 break;
2180 case TCP_SEQ_STATE_OPENREQ:
2181 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2182 break;
2183 case TCP_SEQ_STATE_TIME_WAIT:
2184 get_timewait6_sock(seq, v, st->num);
2185 break;
2187 out:
2188 return 0;
2191 static struct file_operations tcp6_seq_fops;
2192 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2193 .owner = THIS_MODULE,
2194 .name = "tcp6",
2195 .family = AF_INET6,
2196 .seq_show = tcp6_seq_show,
2197 .seq_fops = &tcp6_seq_fops,
2200 int __init tcp6_proc_init(void)
2202 return tcp_proc_register(&tcp6_seq_afinfo);
2205 void tcp6_proc_exit(void)
2207 tcp_proc_unregister(&tcp6_seq_afinfo);
2209 #endif
2211 struct proto tcpv6_prot = {
2212 .name = "TCPv6",
2213 .owner = THIS_MODULE,
2214 .close = tcp_close,
2215 .connect = tcp_v6_connect,
2216 .disconnect = tcp_disconnect,
2217 .accept = tcp_accept,
2218 .ioctl = tcp_ioctl,
2219 .init = tcp_v6_init_sock,
2220 .destroy = tcp_v6_destroy_sock,
2221 .shutdown = tcp_shutdown,
2222 .setsockopt = tcp_setsockopt,
2223 .getsockopt = tcp_getsockopt,
2224 .sendmsg = tcp_sendmsg,
2225 .recvmsg = tcp_recvmsg,
2226 .backlog_rcv = tcp_v6_do_rcv,
2227 .hash = tcp_v6_hash,
2228 .unhash = tcp_unhash,
2229 .get_port = tcp_v6_get_port,
2230 .enter_memory_pressure = tcp_enter_memory_pressure,
2231 .sockets_allocated = &tcp_sockets_allocated,
2232 .memory_allocated = &tcp_memory_allocated,
2233 .memory_pressure = &tcp_memory_pressure,
2234 .sysctl_mem = sysctl_tcp_mem,
2235 .sysctl_wmem = sysctl_tcp_wmem,
2236 .sysctl_rmem = sysctl_tcp_rmem,
2237 .max_header = MAX_TCP_HEADER,
2238 .obj_size = sizeof(struct tcp6_sock),
2239 .rsk_prot = &tcp6_request_sock_ops,
2242 static struct inet6_protocol tcpv6_protocol = {
2243 .handler = tcp_v6_rcv,
2244 .err_handler = tcp_v6_err,
2245 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2248 extern struct proto_ops inet6_stream_ops;
2250 static struct inet_protosw tcpv6_protosw = {
2251 .type = SOCK_STREAM,
2252 .protocol = IPPROTO_TCP,
2253 .prot = &tcpv6_prot,
2254 .ops = &inet6_stream_ops,
2255 .capability = -1,
2256 .no_check = 0,
2257 .flags = INET_PROTOSW_PERMANENT,
2260 void __init tcpv6_init(void)
2262 /* register inet6 protocol */
2263 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2264 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2265 inet6_register_protosw(&tcpv6_protosw);