2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly
;
86 int sysctl_tcp_low_latency __read_mostly
;
89 #ifdef CONFIG_TCP_MD5SIG
90 static struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
,
92 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
93 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
);
96 struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
102 struct inet_hashinfo tcp_hashinfo
;
104 static inline __u32
tcp_v4_init_sequence(struct sk_buff
*skb
)
106 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
109 tcp_hdr(skb
)->source
);
112 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
114 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
115 struct tcp_sock
*tp
= tcp_sk(sk
);
117 /* With PAWS, it is safe from the viewpoint
118 of data integrity. Even without PAWS it is safe provided sequence
119 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
121 Actually, the idea is close to VJ's one, only timestamp cache is
122 held not per host, but per port pair and TW bucket is used as state
125 If TW bucket has been already destroyed we fall back to VJ's scheme
126 and use initial timestamp retrieved from peer table.
128 if (tcptw
->tw_ts_recent_stamp
&&
129 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
130 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
131 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
132 if (tp
->write_seq
== 0)
134 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
135 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
143 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
145 /* This will initiate an outgoing connection. */
146 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
148 struct inet_sock
*inet
= inet_sk(sk
);
149 struct tcp_sock
*tp
= tcp_sk(sk
);
150 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
152 __be32 daddr
, nexthop
;
156 if (addr_len
< sizeof(struct sockaddr_in
))
159 if (usin
->sin_family
!= AF_INET
)
160 return -EAFNOSUPPORT
;
162 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
163 if (inet
->opt
&& inet
->opt
->srr
) {
166 nexthop
= inet
->opt
->faddr
;
169 tmp
= ip_route_connect(&rt
, nexthop
, inet
->inet_saddr
,
170 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
172 inet
->inet_sport
, usin
->sin_port
, sk
, 1);
174 if (tmp
== -ENETUNREACH
)
175 IP_INC_STATS_BH(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
179 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
184 if (!inet
->opt
|| !inet
->opt
->srr
)
187 if (!inet
->inet_saddr
)
188 inet
->inet_saddr
= rt
->rt_src
;
189 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
191 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
192 /* Reset inherited state */
193 tp
->rx_opt
.ts_recent
= 0;
194 tp
->rx_opt
.ts_recent_stamp
= 0;
198 if (tcp_death_row
.sysctl_tw_recycle
&&
199 !tp
->rx_opt
.ts_recent_stamp
&& rt
->rt_dst
== daddr
) {
200 struct inet_peer
*peer
= rt_get_peer(rt
);
202 * VJ's idea. We save last timestamp seen from
203 * the destination in peer table, when entering state
204 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
205 * when trying new connection.
208 (u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
209 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
210 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
214 inet
->inet_dport
= usin
->sin_port
;
215 inet
->inet_daddr
= daddr
;
217 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
219 inet_csk(sk
)->icsk_ext_hdr_len
= inet
->opt
->optlen
;
221 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
223 /* Socket identity is still unknown (sport may be zero).
224 * However we set state to SYN-SENT and not releasing socket
225 * lock select source port, enter ourselves into the hash tables and
226 * complete initialization after this.
228 tcp_set_state(sk
, TCP_SYN_SENT
);
229 err
= inet_hash_connect(&tcp_death_row
, sk
);
233 err
= ip_route_newports(&rt
, IPPROTO_TCP
,
234 inet
->inet_sport
, inet
->inet_dport
, sk
);
238 /* OK, now commit destination to socket. */
239 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
240 sk_setup_caps(sk
, &rt
->u
.dst
);
243 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
248 inet
->inet_id
= tp
->write_seq
^ jiffies
;
250 err
= tcp_connect(sk
);
259 * This unhashes the socket and releases the local port,
262 tcp_set_state(sk
, TCP_CLOSE
);
264 sk
->sk_route_caps
= 0;
265 inet
->inet_dport
= 0;
270 * This routine does path mtu discovery as defined in RFC1191.
272 static void do_pmtu_discovery(struct sock
*sk
, struct iphdr
*iph
, u32 mtu
)
274 struct dst_entry
*dst
;
275 struct inet_sock
*inet
= inet_sk(sk
);
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk
->sk_state
== TCP_LISTEN
)
284 /* We don't check in the destentry if pmtu discovery is forbidden
285 * on this route. We just assume that no packet_to_big packets
286 * are send back when pmtu discovery is not active.
287 * There is a small race when the user changes this flag in the
288 * route, but I think that's acceptable.
290 if ((dst
= __sk_dst_check(sk
, 0)) == NULL
)
293 dst
->ops
->update_pmtu(dst
, mtu
);
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
298 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
299 sk
->sk_err_soft
= EMSGSIZE
;
303 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
304 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
305 tcp_sync_mss(sk
, mtu
);
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
312 tcp_simple_retransmit(sk
);
313 } /* else let the usual retransmit timer handle it */
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code. After adjustment
321 * header points to the first 8 bytes of the tcp header. We need
322 * to find the appropriate port.
324 * The locking strategy used here is very "optimistic". When
325 * someone else accesses the socket the ICMP is just dropped
326 * and for some paths there is no check at all.
327 * A more general error queue to queue errors for later handling
328 * is probably better.
332 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
334 struct iphdr
*iph
= (struct iphdr
*)icmp_skb
->data
;
335 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
336 struct inet_connection_sock
*icsk
;
338 struct inet_sock
*inet
;
339 const int type
= icmp_hdr(icmp_skb
)->type
;
340 const int code
= icmp_hdr(icmp_skb
)->code
;
346 struct net
*net
= dev_net(icmp_skb
->dev
);
348 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
349 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
353 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
354 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
356 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
359 if (sk
->sk_state
== TCP_TIME_WAIT
) {
360 inet_twsk_put(inet_twsk(sk
));
365 /* If too many ICMPs get dropped on busy
366 * servers this needs to be solved differently.
368 if (sock_owned_by_user(sk
))
369 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
371 if (sk
->sk_state
== TCP_CLOSE
)
374 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
375 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
381 seq
= ntohl(th
->seq
);
382 if (sk
->sk_state
!= TCP_LISTEN
&&
383 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
384 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
389 case ICMP_SOURCE_QUENCH
:
390 /* Just silently ignore these. */
392 case ICMP_PARAMETERPROB
:
395 case ICMP_DEST_UNREACH
:
396 if (code
> NR_ICMP_UNREACH
)
399 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
400 if (!sock_owned_by_user(sk
))
401 do_pmtu_discovery(sk
, iph
, info
);
405 err
= icmp_err_convert
[code
].errno
;
406 /* check if icmp_skb allows revert of backoff
407 * (see draft-zimmermann-tcp-lcd) */
408 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
410 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
414 icsk
->icsk_backoff
--;
415 inet_csk(sk
)->icsk_rto
= __tcp_set_rto(tp
) <<
419 skb
= tcp_write_queue_head(sk
);
422 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
423 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
426 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
427 remaining
, TCP_RTO_MAX
);
428 } else if (sock_owned_by_user(sk
)) {
429 /* RTO revert clocked out retransmission,
430 * but socket is locked. Will defer. */
431 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
434 /* RTO revert clocked out retransmission.
435 * Will retransmit now */
436 tcp_retransmit_timer(sk
);
440 case ICMP_TIME_EXCEEDED
:
447 switch (sk
->sk_state
) {
448 struct request_sock
*req
, **prev
;
450 if (sock_owned_by_user(sk
))
453 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
454 iph
->daddr
, iph
->saddr
);
458 /* ICMPs are not backlogged, hence we cannot get
459 an established socket here.
463 if (seq
!= tcp_rsk(req
)->snt_isn
) {
464 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
469 * Still in SYN_RECV, just remove it silently.
470 * There is no good way to pass the error to the newly
471 * created socket, and POSIX does not want network
472 * errors returned from accept().
474 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
478 case TCP_SYN_RECV
: /* Cannot happen.
479 It can f.e. if SYNs crossed.
481 if (!sock_owned_by_user(sk
)) {
484 sk
->sk_error_report(sk
);
488 sk
->sk_err_soft
= err
;
493 /* If we've already connected we will keep trying
494 * until we time out, or the user gives up.
496 * rfc1122 4.2.3.9 allows to consider as hard errors
497 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
498 * but it is obsoleted by pmtu discovery).
500 * Note, that in modern internet, where routing is unreliable
501 * and in each dark corner broken firewalls sit, sending random
502 * errors ordered by their masters even this two messages finally lose
503 * their original sense (even Linux sends invalid PORT_UNREACHs)
505 * Now we are in compliance with RFCs.
510 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
512 sk
->sk_error_report(sk
);
513 } else { /* Only an error on timeout */
514 sk
->sk_err_soft
= err
;
522 static void __tcp_v4_send_check(struct sk_buff
*skb
,
523 __be32 saddr
, __be32 daddr
)
525 struct tcphdr
*th
= tcp_hdr(skb
);
527 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
528 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
529 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
530 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
532 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
539 /* This routine computes an IPv4 TCP checksum. */
540 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
542 struct inet_sock
*inet
= inet_sk(sk
);
544 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
547 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
549 const struct iphdr
*iph
;
552 if (!pskb_may_pull(skb
, sizeof(*th
)))
559 skb
->ip_summed
= CHECKSUM_PARTIAL
;
560 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
565 * This routine will send an RST to the other tcp.
567 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569 * Answer: if a packet caused RST, it is not for a socket
570 * existing in our system, if it is matched to a socket,
571 * it is just duplicate segment or bug in other side's TCP.
572 * So that we build reply only basing on parameters
573 * arrived with segment.
574 * Exception: precedence violation. We do not implement it in any case.
577 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
579 struct tcphdr
*th
= tcp_hdr(skb
);
582 #ifdef CONFIG_TCP_MD5SIG
583 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
586 struct ip_reply_arg arg
;
587 #ifdef CONFIG_TCP_MD5SIG
588 struct tcp_md5sig_key
*key
;
592 /* Never send a reset in response to a reset. */
596 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
599 /* Swap the send and the receive. */
600 memset(&rep
, 0, sizeof(rep
));
601 rep
.th
.dest
= th
->source
;
602 rep
.th
.source
= th
->dest
;
603 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
607 rep
.th
.seq
= th
->ack_seq
;
610 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
611 skb
->len
- (th
->doff
<< 2));
614 memset(&arg
, 0, sizeof(arg
));
615 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
616 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
618 #ifdef CONFIG_TCP_MD5SIG
619 key
= sk
? tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
) : NULL
;
621 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
623 (TCPOPT_MD5SIG
<< 8) |
625 /* Update length and the length the header thinks exists */
626 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
627 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
629 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
630 key
, ip_hdr(skb
)->saddr
,
631 ip_hdr(skb
)->daddr
, &rep
.th
);
634 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
635 ip_hdr(skb
)->saddr
, /* XXX */
636 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
637 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
638 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
640 net
= dev_net(skb_dst(skb
)->dev
);
641 ip_send_reply(net
->ipv4
.tcp_sock
, skb
,
642 &arg
, arg
.iov
[0].iov_len
);
644 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
645 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
648 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
649 outside socket context is ugly, certainly. What can I do?
652 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
653 u32 win
, u32 ts
, int oif
,
654 struct tcp_md5sig_key
*key
,
657 struct tcphdr
*th
= tcp_hdr(skb
);
660 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
661 #ifdef CONFIG_TCP_MD5SIG
662 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
666 struct ip_reply_arg arg
;
667 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
669 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
670 memset(&arg
, 0, sizeof(arg
));
672 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
673 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
675 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
676 (TCPOPT_TIMESTAMP
<< 8) |
678 rep
.opt
[1] = htonl(tcp_time_stamp
);
679 rep
.opt
[2] = htonl(ts
);
680 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
683 /* Swap the send and the receive. */
684 rep
.th
.dest
= th
->source
;
685 rep
.th
.source
= th
->dest
;
686 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
687 rep
.th
.seq
= htonl(seq
);
688 rep
.th
.ack_seq
= htonl(ack
);
690 rep
.th
.window
= htons(win
);
692 #ifdef CONFIG_TCP_MD5SIG
694 int offset
= (ts
) ? 3 : 0;
696 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
698 (TCPOPT_MD5SIG
<< 8) |
700 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
701 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
703 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
704 key
, ip_hdr(skb
)->saddr
,
705 ip_hdr(skb
)->daddr
, &rep
.th
);
708 arg
.flags
= reply_flags
;
709 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
710 ip_hdr(skb
)->saddr
, /* XXX */
711 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
712 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
714 arg
.bound_dev_if
= oif
;
716 ip_send_reply(net
->ipv4
.tcp_sock
, skb
,
717 &arg
, arg
.iov
[0].iov_len
);
719 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
722 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
724 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
725 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
727 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
728 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
731 tcp_twsk_md5_key(tcptw
),
732 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0
738 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
739 struct request_sock
*req
)
741 tcp_v4_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1,
742 tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
,
745 tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
),
746 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0);
750 * Send a SYN-ACK after having received a SYN.
751 * This still operates on a request_sock only, not on a big
754 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
755 struct request_sock
*req
,
756 struct request_values
*rvp
)
758 const struct inet_request_sock
*ireq
= inet_rsk(req
);
760 struct sk_buff
* skb
;
762 /* First, grab a route. */
763 if (!dst
&& (dst
= inet_csk_route_req(sk
, req
)) == NULL
)
766 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
769 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
771 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
774 err
= net_xmit_eval(err
);
781 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
782 struct request_values
*rvp
)
784 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
785 return tcp_v4_send_synack(sk
, NULL
, req
, rvp
);
789 * IPv4 request_sock destructor.
791 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
793 kfree(inet_rsk(req
)->opt
);
796 #ifdef CONFIG_SYN_COOKIES
797 static void syn_flood_warning(struct sk_buff
*skb
)
799 static unsigned long warntime
;
801 if (time_after(jiffies
, (warntime
+ HZ
* 60))) {
804 "possible SYN flooding on port %d. Sending cookies.\n",
805 ntohs(tcp_hdr(skb
)->dest
));
811 * Save and compile IPv4 options into the request_sock if needed.
813 static struct ip_options
*tcp_v4_save_options(struct sock
*sk
,
816 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
817 struct ip_options
*dopt
= NULL
;
819 if (opt
&& opt
->optlen
) {
820 int opt_size
= optlength(opt
);
821 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
823 if (ip_options_echo(dopt
, skb
)) {
832 #ifdef CONFIG_TCP_MD5SIG
834 * RFC2385 MD5 checksumming requires a mapping of
835 * IP address->MD5 Key.
836 * We need to maintain these in the sk structure.
839 /* Find the Key structure for an address. */
840 static struct tcp_md5sig_key
*
841 tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
843 struct tcp_sock
*tp
= tcp_sk(sk
);
846 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries4
)
848 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
849 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
)
850 return &tp
->md5sig_info
->keys4
[i
].base
;
855 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
856 struct sock
*addr_sk
)
858 return tcp_v4_md5_do_lookup(sk
, inet_sk(addr_sk
)->inet_daddr
);
861 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
863 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
864 struct request_sock
*req
)
866 return tcp_v4_md5_do_lookup(sk
, inet_rsk(req
)->rmt_addr
);
869 /* This can be called on a newly created socket, from other files */
870 int tcp_v4_md5_do_add(struct sock
*sk
, __be32 addr
,
871 u8
*newkey
, u8 newkeylen
)
873 /* Add Key to the list */
874 struct tcp_md5sig_key
*key
;
875 struct tcp_sock
*tp
= tcp_sk(sk
);
876 struct tcp4_md5sig_key
*keys
;
878 key
= tcp_v4_md5_do_lookup(sk
, addr
);
880 /* Pre-existing entry - just update that one. */
883 key
->keylen
= newkeylen
;
885 struct tcp_md5sig_info
*md5sig
;
887 if (!tp
->md5sig_info
) {
888 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
),
890 if (!tp
->md5sig_info
) {
894 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
896 if (tcp_alloc_md5sig_pool(sk
) == NULL
) {
900 md5sig
= tp
->md5sig_info
;
902 if (md5sig
->alloced4
== md5sig
->entries4
) {
903 keys
= kmalloc((sizeof(*keys
) *
904 (md5sig
->entries4
+ 1)), GFP_ATOMIC
);
907 tcp_free_md5sig_pool();
911 if (md5sig
->entries4
)
912 memcpy(keys
, md5sig
->keys4
,
913 sizeof(*keys
) * md5sig
->entries4
);
915 /* Free old key list, and reference new one */
916 kfree(md5sig
->keys4
);
917 md5sig
->keys4
= keys
;
921 md5sig
->keys4
[md5sig
->entries4
- 1].addr
= addr
;
922 md5sig
->keys4
[md5sig
->entries4
- 1].base
.key
= newkey
;
923 md5sig
->keys4
[md5sig
->entries4
- 1].base
.keylen
= newkeylen
;
928 EXPORT_SYMBOL(tcp_v4_md5_do_add
);
930 static int tcp_v4_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
931 u8
*newkey
, u8 newkeylen
)
933 return tcp_v4_md5_do_add(sk
, inet_sk(addr_sk
)->inet_daddr
,
937 int tcp_v4_md5_do_del(struct sock
*sk
, __be32 addr
)
939 struct tcp_sock
*tp
= tcp_sk(sk
);
942 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
943 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
) {
945 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
946 tp
->md5sig_info
->entries4
--;
948 if (tp
->md5sig_info
->entries4
== 0) {
949 kfree(tp
->md5sig_info
->keys4
);
950 tp
->md5sig_info
->keys4
= NULL
;
951 tp
->md5sig_info
->alloced4
= 0;
952 } else if (tp
->md5sig_info
->entries4
!= i
) {
953 /* Need to do some manipulation */
954 memmove(&tp
->md5sig_info
->keys4
[i
],
955 &tp
->md5sig_info
->keys4
[i
+1],
956 (tp
->md5sig_info
->entries4
- i
) *
957 sizeof(struct tcp4_md5sig_key
));
959 tcp_free_md5sig_pool();
966 EXPORT_SYMBOL(tcp_v4_md5_do_del
);
968 static void tcp_v4_clear_md5_list(struct sock
*sk
)
970 struct tcp_sock
*tp
= tcp_sk(sk
);
972 /* Free each key, then the set of key keys,
973 * the crypto element, and then decrement our
974 * hold on the last resort crypto.
976 if (tp
->md5sig_info
->entries4
) {
978 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
979 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
980 tp
->md5sig_info
->entries4
= 0;
981 tcp_free_md5sig_pool();
983 if (tp
->md5sig_info
->keys4
) {
984 kfree(tp
->md5sig_info
->keys4
);
985 tp
->md5sig_info
->keys4
= NULL
;
986 tp
->md5sig_info
->alloced4
= 0;
990 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
993 struct tcp_md5sig cmd
;
994 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
997 if (optlen
< sizeof(cmd
))
1000 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1003 if (sin
->sin_family
!= AF_INET
)
1006 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
) {
1007 if (!tcp_sk(sk
)->md5sig_info
)
1009 return tcp_v4_md5_do_del(sk
, sin
->sin_addr
.s_addr
);
1012 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1015 if (!tcp_sk(sk
)->md5sig_info
) {
1016 struct tcp_sock
*tp
= tcp_sk(sk
);
1017 struct tcp_md5sig_info
*p
;
1019 p
= kzalloc(sizeof(*p
), sk
->sk_allocation
);
1023 tp
->md5sig_info
= p
;
1024 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1027 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, sk
->sk_allocation
);
1030 return tcp_v4_md5_do_add(sk
, sin
->sin_addr
.s_addr
,
1031 newkey
, cmd
.tcpm_keylen
);
1034 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1035 __be32 daddr
, __be32 saddr
, int nbytes
)
1037 struct tcp4_pseudohdr
*bp
;
1038 struct scatterlist sg
;
1040 bp
= &hp
->md5_blk
.ip4
;
1043 * 1. the TCP pseudo-header (in the order: source IP address,
1044 * destination IP address, zero-padded protocol number, and
1050 bp
->protocol
= IPPROTO_TCP
;
1051 bp
->len
= cpu_to_be16(nbytes
);
1053 sg_init_one(&sg
, bp
, sizeof(*bp
));
1054 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1057 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
1058 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
)
1060 struct tcp_md5sig_pool
*hp
;
1061 struct hash_desc
*desc
;
1063 hp
= tcp_get_md5sig_pool();
1065 goto clear_hash_noput
;
1066 desc
= &hp
->md5_desc
;
1068 if (crypto_hash_init(desc
))
1070 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1072 if (tcp_md5_hash_header(hp
, th
))
1074 if (tcp_md5_hash_key(hp
, key
))
1076 if (crypto_hash_final(desc
, md5_hash
))
1079 tcp_put_md5sig_pool();
1083 tcp_put_md5sig_pool();
1085 memset(md5_hash
, 0, 16);
1089 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1090 struct sock
*sk
, struct request_sock
*req
,
1091 struct sk_buff
*skb
)
1093 struct tcp_md5sig_pool
*hp
;
1094 struct hash_desc
*desc
;
1095 struct tcphdr
*th
= tcp_hdr(skb
);
1096 __be32 saddr
, daddr
;
1099 saddr
= inet_sk(sk
)->inet_saddr
;
1100 daddr
= inet_sk(sk
)->inet_daddr
;
1102 saddr
= inet_rsk(req
)->loc_addr
;
1103 daddr
= inet_rsk(req
)->rmt_addr
;
1105 const struct iphdr
*iph
= ip_hdr(skb
);
1110 hp
= tcp_get_md5sig_pool();
1112 goto clear_hash_noput
;
1113 desc
= &hp
->md5_desc
;
1115 if (crypto_hash_init(desc
))
1118 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1120 if (tcp_md5_hash_header(hp
, th
))
1122 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1124 if (tcp_md5_hash_key(hp
, key
))
1126 if (crypto_hash_final(desc
, md5_hash
))
1129 tcp_put_md5sig_pool();
1133 tcp_put_md5sig_pool();
1135 memset(md5_hash
, 0, 16);
1139 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1141 static int tcp_v4_inbound_md5_hash(struct sock
*sk
, struct sk_buff
*skb
)
1144 * This gets called for each TCP segment that arrives
1145 * so we want to be efficient.
1146 * We have 3 drop cases:
1147 * o No MD5 hash and one expected.
1148 * o MD5 hash and we're not expecting one.
1149 * o MD5 hash and its wrong.
1151 __u8
*hash_location
= NULL
;
1152 struct tcp_md5sig_key
*hash_expected
;
1153 const struct iphdr
*iph
= ip_hdr(skb
);
1154 struct tcphdr
*th
= tcp_hdr(skb
);
1156 unsigned char newhash
[16];
1158 hash_expected
= tcp_v4_md5_do_lookup(sk
, iph
->saddr
);
1159 hash_location
= tcp_parse_md5sig_option(th
);
1161 /* We've parsed the options - do we have a hash? */
1162 if (!hash_expected
&& !hash_location
)
1165 if (hash_expected
&& !hash_location
) {
1166 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1170 if (!hash_expected
&& hash_location
) {
1171 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1175 /* Okay, so this is hash_expected and hash_location -
1176 * so we need to calculate the checksum.
1178 genhash
= tcp_v4_md5_hash_skb(newhash
,
1182 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1183 if (net_ratelimit()) {
1184 printk(KERN_INFO
"MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1185 &iph
->saddr
, ntohs(th
->source
),
1186 &iph
->daddr
, ntohs(th
->dest
),
1187 genhash
? " tcp_v4_calc_md5_hash failed" : "");
1196 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1198 .obj_size
= sizeof(struct tcp_request_sock
),
1199 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1200 .send_ack
= tcp_v4_reqsk_send_ack
,
1201 .destructor
= tcp_v4_reqsk_destructor
,
1202 .send_reset
= tcp_v4_send_reset
,
1203 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1206 #ifdef CONFIG_TCP_MD5SIG
1207 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1208 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1209 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1213 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1214 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1215 .twsk_unique
= tcp_twsk_unique
,
1216 .twsk_destructor
= tcp_twsk_destructor
,
1219 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1221 struct tcp_extend_values tmp_ext
;
1222 struct tcp_options_received tmp_opt
;
1224 struct request_sock
*req
;
1225 struct inet_request_sock
*ireq
;
1226 struct tcp_sock
*tp
= tcp_sk(sk
);
1227 struct dst_entry
*dst
= NULL
;
1228 __be32 saddr
= ip_hdr(skb
)->saddr
;
1229 __be32 daddr
= ip_hdr(skb
)->daddr
;
1230 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1231 #ifdef CONFIG_SYN_COOKIES
1232 int want_cookie
= 0;
1234 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1237 /* Never answer to SYNs send to broadcast or multicast */
1238 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1241 /* TW buckets are converted to open requests without
1242 * limitations, they conserve resources and peer is
1243 * evidently real one.
1245 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1246 #ifdef CONFIG_SYN_COOKIES
1247 if (sysctl_tcp_syncookies
) {
1254 /* Accept backlog is full. If we have already queued enough
1255 * of warm entries in syn queue, drop request. It is better than
1256 * clogging syn queue with openreqs with exponentially increasing
1259 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1262 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1266 #ifdef CONFIG_TCP_MD5SIG
1267 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1270 tcp_clear_options(&tmp_opt
);
1271 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1272 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1273 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1275 if (tmp_opt
.cookie_plus
> 0 &&
1276 tmp_opt
.saw_tstamp
&&
1277 !tp
->rx_opt
.cookie_out_never
&&
1278 (sysctl_tcp_cookie_size
> 0 ||
1279 (tp
->cookie_values
!= NULL
&&
1280 tp
->cookie_values
->cookie_desired
> 0))) {
1282 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1283 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1285 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1286 goto drop_and_release
;
1288 /* Secret recipe starts with IP addresses */
1289 *mess
++ ^= (__force u32
)daddr
;
1290 *mess
++ ^= (__force u32
)saddr
;
1292 /* plus variable length Initiator Cookie */
1295 *c
++ ^= *hash_location
++;
1297 #ifdef CONFIG_SYN_COOKIES
1298 want_cookie
= 0; /* not our kind of cookie */
1300 tmp_ext
.cookie_out_never
= 0; /* false */
1301 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1302 } else if (!tp
->rx_opt
.cookie_in_always
) {
1303 /* redundant indications, but ensure initialization. */
1304 tmp_ext
.cookie_out_never
= 1; /* true */
1305 tmp_ext
.cookie_plus
= 0;
1307 goto drop_and_release
;
1309 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1311 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1312 tcp_clear_options(&tmp_opt
);
1314 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1315 tcp_openreq_init(req
, &tmp_opt
, skb
);
1317 ireq
= inet_rsk(req
);
1318 ireq
->loc_addr
= daddr
;
1319 ireq
->rmt_addr
= saddr
;
1320 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1321 ireq
->opt
= tcp_v4_save_options(sk
, skb
);
1323 if (security_inet_conn_request(sk
, skb
, req
))
1327 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1330 #ifdef CONFIG_SYN_COOKIES
1331 syn_flood_warning(skb
);
1332 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1334 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1336 struct inet_peer
*peer
= NULL
;
1338 /* VJ's idea. We save last timestamp seen
1339 * from the destination in peer table, when entering
1340 * state TIME-WAIT, and check against it before
1341 * accepting new connection request.
1343 * If "isn" is not zero, this request hit alive
1344 * timewait bucket, so that all the necessary checks
1345 * are made in the function processing timewait state.
1347 if (tmp_opt
.saw_tstamp
&&
1348 tcp_death_row
.sysctl_tw_recycle
&&
1349 (dst
= inet_csk_route_req(sk
, req
)) != NULL
&&
1350 (peer
= rt_get_peer((struct rtable
*)dst
)) != NULL
&&
1351 peer
->v4daddr
== saddr
) {
1352 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1353 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1355 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1356 goto drop_and_release
;
1359 /* Kill the following clause, if you dislike this way. */
1360 else if (!sysctl_tcp_syncookies
&&
1361 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1362 (sysctl_max_syn_backlog
>> 2)) &&
1363 (!peer
|| !peer
->tcp_ts_stamp
) &&
1364 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1365 /* Without syncookies last quarter of
1366 * backlog is filled with destinations,
1367 * proven to be alive.
1368 * It means that we continue to communicate
1369 * to destinations, already remembered
1370 * to the moment of synflood.
1372 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI4/%u\n",
1373 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1374 goto drop_and_release
;
1377 isn
= tcp_v4_init_sequence(skb
);
1379 tcp_rsk(req
)->snt_isn
= isn
;
1381 if (tcp_v4_send_synack(sk
, dst
, req
,
1382 (struct request_values
*)&tmp_ext
) ||
1386 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1399 * The three way handshake has completed - we got a valid synack -
1400 * now create the new socket.
1402 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1403 struct request_sock
*req
,
1404 struct dst_entry
*dst
)
1406 struct inet_request_sock
*ireq
;
1407 struct inet_sock
*newinet
;
1408 struct tcp_sock
*newtp
;
1410 #ifdef CONFIG_TCP_MD5SIG
1411 struct tcp_md5sig_key
*key
;
1414 if (sk_acceptq_is_full(sk
))
1417 if (!dst
&& (dst
= inet_csk_route_req(sk
, req
)) == NULL
)
1420 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1424 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1425 sk_setup_caps(newsk
, dst
);
1427 newtp
= tcp_sk(newsk
);
1428 newinet
= inet_sk(newsk
);
1429 ireq
= inet_rsk(req
);
1430 newinet
->inet_daddr
= ireq
->rmt_addr
;
1431 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1432 newinet
->inet_saddr
= ireq
->loc_addr
;
1433 newinet
->opt
= ireq
->opt
;
1435 newinet
->mc_index
= inet_iif(skb
);
1436 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1437 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1439 inet_csk(newsk
)->icsk_ext_hdr_len
= newinet
->opt
->optlen
;
1440 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1442 tcp_mtup_init(newsk
);
1443 tcp_sync_mss(newsk
, dst_mtu(dst
));
1444 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1445 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1446 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1447 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1449 tcp_initialize_rcv_mss(newsk
);
1451 #ifdef CONFIG_TCP_MD5SIG
1452 /* Copy over the MD5 key from the original socket */
1453 key
= tcp_v4_md5_do_lookup(sk
, newinet
->inet_daddr
);
1456 * We're using one, so create a matching key
1457 * on the newsk structure. If we fail to get
1458 * memory, then we end up not copying the key
1461 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1463 tcp_v4_md5_do_add(newsk
, newinet
->inet_daddr
,
1464 newkey
, key
->keylen
);
1465 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1469 __inet_hash_nolisten(newsk
, NULL
);
1470 __inet_inherit_port(sk
, newsk
);
1475 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1477 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1482 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1484 struct tcphdr
*th
= tcp_hdr(skb
);
1485 const struct iphdr
*iph
= ip_hdr(skb
);
1487 struct request_sock
**prev
;
1488 /* Find possible connection requests. */
1489 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1490 iph
->saddr
, iph
->daddr
);
1492 return tcp_check_req(sk
, skb
, req
, prev
);
1494 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1495 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1498 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1502 inet_twsk_put(inet_twsk(nsk
));
1506 #ifdef CONFIG_SYN_COOKIES
1507 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1508 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1513 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1515 const struct iphdr
*iph
= ip_hdr(skb
);
1517 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1518 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1519 iph
->daddr
, skb
->csum
)) {
1520 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1525 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1526 skb
->len
, IPPROTO_TCP
, 0);
1528 if (skb
->len
<= 76) {
1529 return __skb_checksum_complete(skb
);
1535 /* The socket must have it's spinlock held when we get
1538 * We have a potential double-lock case here, so even when
1539 * doing backlog processing we use the BH locking scheme.
1540 * This is because we cannot sleep with the original spinlock
1543 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1546 #ifdef CONFIG_TCP_MD5SIG
1548 * We really want to reject the packet as early as possible
1550 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1551 * o There is an MD5 option and we're not expecting one
1553 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1557 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1558 TCP_CHECK_TIMER(sk
);
1559 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1563 TCP_CHECK_TIMER(sk
);
1567 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1570 if (sk
->sk_state
== TCP_LISTEN
) {
1571 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1576 if (tcp_child_process(sk
, nsk
, skb
)) {
1584 TCP_CHECK_TIMER(sk
);
1585 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1589 TCP_CHECK_TIMER(sk
);
1593 tcp_v4_send_reset(rsk
, skb
);
1596 /* Be careful here. If this function gets more complicated and
1597 * gcc suffers from register pressure on the x86, sk (in %ebx)
1598 * might be destroyed here. This current version compiles correctly,
1599 * but you have been warned.
1604 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1612 int tcp_v4_rcv(struct sk_buff
*skb
)
1614 const struct iphdr
*iph
;
1618 struct net
*net
= dev_net(skb
->dev
);
1620 if (skb
->pkt_type
!= PACKET_HOST
)
1623 /* Count it even if it's bad */
1624 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1626 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1631 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1633 if (!pskb_may_pull(skb
, th
->doff
* 4))
1636 /* An explanation is required here, I think.
1637 * Packet length and doff are validated by header prediction,
1638 * provided case of th->doff==0 is eliminated.
1639 * So, we defer the checks. */
1640 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1645 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1646 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1647 skb
->len
- th
->doff
* 4);
1648 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1649 TCP_SKB_CB(skb
)->when
= 0;
1650 TCP_SKB_CB(skb
)->flags
= iph
->tos
;
1651 TCP_SKB_CB(skb
)->sacked
= 0;
1653 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1658 if (sk
->sk_state
== TCP_TIME_WAIT
)
1661 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1662 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1663 goto discard_and_relse
;
1666 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1667 goto discard_and_relse
;
1670 if (sk_filter(sk
, skb
))
1671 goto discard_and_relse
;
1675 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1677 bh_lock_sock_nested(sk
);
1679 if (!sock_owned_by_user(sk
)) {
1680 #ifdef CONFIG_NET_DMA
1681 struct tcp_sock
*tp
= tcp_sk(sk
);
1682 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1683 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1684 if (tp
->ucopy
.dma_chan
)
1685 ret
= tcp_v4_do_rcv(sk
, skb
);
1689 if (!tcp_prequeue(sk
, skb
))
1690 ret
= tcp_v4_do_rcv(sk
, skb
);
1692 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1694 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1695 goto discard_and_relse
;
1704 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1707 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1709 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1711 tcp_v4_send_reset(NULL
, skb
);
1715 /* Discard frame. */
1724 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1725 inet_twsk_put(inet_twsk(sk
));
1729 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1730 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1731 inet_twsk_put(inet_twsk(sk
));
1734 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1736 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1738 iph
->daddr
, th
->dest
,
1741 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
1742 inet_twsk_put(inet_twsk(sk
));
1746 /* Fall through to ACK */
1749 tcp_v4_timewait_ack(sk
, skb
);
1753 case TCP_TW_SUCCESS
:;
1758 /* VJ's idea. Save last timestamp seen from this destination
1759 * and hold it at least for normal timewait interval to use for duplicate
1760 * segment detection in subsequent connections, before they enter synchronized
1764 int tcp_v4_remember_stamp(struct sock
*sk
)
1766 struct inet_sock
*inet
= inet_sk(sk
);
1767 struct tcp_sock
*tp
= tcp_sk(sk
);
1768 struct rtable
*rt
= (struct rtable
*)__sk_dst_get(sk
);
1769 struct inet_peer
*peer
= NULL
;
1772 if (!rt
|| rt
->rt_dst
!= inet
->inet_daddr
) {
1773 peer
= inet_getpeer(inet
->inet_daddr
, 1);
1777 rt_bind_peer(rt
, 1);
1782 if ((s32
)(peer
->tcp_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
1783 ((u32
)get_seconds() - peer
->tcp_ts_stamp
> TCP_PAWS_MSL
&&
1784 peer
->tcp_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
1785 peer
->tcp_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
1786 peer
->tcp_ts
= tp
->rx_opt
.ts_recent
;
1796 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock
*tw
)
1798 struct inet_peer
*peer
= inet_getpeer(tw
->tw_daddr
, 1);
1801 const struct tcp_timewait_sock
*tcptw
= tcp_twsk((struct sock
*)tw
);
1803 if ((s32
)(peer
->tcp_ts
- tcptw
->tw_ts_recent
) <= 0 ||
1804 ((u32
)get_seconds() - peer
->tcp_ts_stamp
> TCP_PAWS_MSL
&&
1805 peer
->tcp_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
1806 peer
->tcp_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
1807 peer
->tcp_ts
= tcptw
->tw_ts_recent
;
1816 const struct inet_connection_sock_af_ops ipv4_specific
= {
1817 .queue_xmit
= ip_queue_xmit
,
1818 .send_check
= tcp_v4_send_check
,
1819 .rebuild_header
= inet_sk_rebuild_header
,
1820 .conn_request
= tcp_v4_conn_request
,
1821 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1822 .remember_stamp
= tcp_v4_remember_stamp
,
1823 .net_header_len
= sizeof(struct iphdr
),
1824 .setsockopt
= ip_setsockopt
,
1825 .getsockopt
= ip_getsockopt
,
1826 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1827 .sockaddr_len
= sizeof(struct sockaddr_in
),
1828 .bind_conflict
= inet_csk_bind_conflict
,
1829 #ifdef CONFIG_COMPAT
1830 .compat_setsockopt
= compat_ip_setsockopt
,
1831 .compat_getsockopt
= compat_ip_getsockopt
,
1835 #ifdef CONFIG_TCP_MD5SIG
1836 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1837 .md5_lookup
= tcp_v4_md5_lookup
,
1838 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1839 .md5_add
= tcp_v4_md5_add_func
,
1840 .md5_parse
= tcp_v4_parse_md5_keys
,
1844 /* NOTE: A lot of things set to zero explicitly by call to
1845 * sk_alloc() so need not be done here.
1847 static int tcp_v4_init_sock(struct sock
*sk
)
1849 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1850 struct tcp_sock
*tp
= tcp_sk(sk
);
1852 skb_queue_head_init(&tp
->out_of_order_queue
);
1853 tcp_init_xmit_timers(sk
);
1854 tcp_prequeue_init(tp
);
1856 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1857 tp
->mdev
= TCP_TIMEOUT_INIT
;
1859 /* So many TCP implementations out there (incorrectly) count the
1860 * initial SYN frame in their delayed-ACK and congestion control
1861 * algorithms that we must have the following bandaid to talk
1862 * efficiently to them. -DaveM
1866 /* See draft-stevens-tcpca-spec-01 for discussion of the
1867 * initialization of these values.
1869 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1870 tp
->snd_cwnd_clamp
= ~0;
1871 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1873 tp
->reordering
= sysctl_tcp_reordering
;
1874 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1876 sk
->sk_state
= TCP_CLOSE
;
1878 sk
->sk_write_space
= sk_stream_write_space
;
1879 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1881 icsk
->icsk_af_ops
= &ipv4_specific
;
1882 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1883 #ifdef CONFIG_TCP_MD5SIG
1884 tp
->af_specific
= &tcp_sock_ipv4_specific
;
1887 /* TCP Cookie Transactions */
1888 if (sysctl_tcp_cookie_size
> 0) {
1889 /* Default, cookies without s_data_payload. */
1891 kzalloc(sizeof(*tp
->cookie_values
),
1893 if (tp
->cookie_values
!= NULL
)
1894 kref_init(&tp
->cookie_values
->kref
);
1896 /* Presumed zeroed, in order of appearance:
1897 * cookie_in_always, cookie_out_never,
1898 * s_data_constant, s_data_in, s_data_out
1900 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1901 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1904 percpu_counter_inc(&tcp_sockets_allocated
);
1910 void tcp_v4_destroy_sock(struct sock
*sk
)
1912 struct tcp_sock
*tp
= tcp_sk(sk
);
1914 tcp_clear_xmit_timers(sk
);
1916 tcp_cleanup_congestion_control(sk
);
1918 /* Cleanup up the write buffer. */
1919 tcp_write_queue_purge(sk
);
1921 /* Cleans up our, hopefully empty, out_of_order_queue. */
1922 __skb_queue_purge(&tp
->out_of_order_queue
);
1924 #ifdef CONFIG_TCP_MD5SIG
1925 /* Clean up the MD5 key list, if any */
1926 if (tp
->md5sig_info
) {
1927 tcp_v4_clear_md5_list(sk
);
1928 kfree(tp
->md5sig_info
);
1929 tp
->md5sig_info
= NULL
;
1933 #ifdef CONFIG_NET_DMA
1934 /* Cleans up our sk_async_wait_queue */
1935 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1938 /* Clean prequeue, it must be empty really */
1939 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1941 /* Clean up a referenced TCP bind bucket. */
1942 if (inet_csk(sk
)->icsk_bind_hash
)
1946 * If sendmsg cached page exists, toss it.
1948 if (sk
->sk_sndmsg_page
) {
1949 __free_page(sk
->sk_sndmsg_page
);
1950 sk
->sk_sndmsg_page
= NULL
;
1953 /* TCP Cookie Transactions */
1954 if (tp
->cookie_values
!= NULL
) {
1955 kref_put(&tp
->cookie_values
->kref
,
1956 tcp_cookie_values_release
);
1957 tp
->cookie_values
= NULL
;
1960 percpu_counter_dec(&tcp_sockets_allocated
);
1963 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1965 #ifdef CONFIG_PROC_FS
1966 /* Proc filesystem TCP sock list dumping. */
1968 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
1970 return hlist_nulls_empty(head
) ? NULL
:
1971 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
1974 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
1976 return !is_a_nulls(tw
->tw_node
.next
) ?
1977 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
1980 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1982 struct inet_connection_sock
*icsk
;
1983 struct hlist_nulls_node
*node
;
1984 struct sock
*sk
= cur
;
1985 struct inet_listen_hashbucket
*ilb
;
1986 struct tcp_iter_state
*st
= seq
->private;
1987 struct net
*net
= seq_file_net(seq
);
1991 ilb
= &tcp_hashinfo
.listening_hash
[0];
1992 spin_lock_bh(&ilb
->lock
);
1993 sk
= sk_nulls_head(&ilb
->head
);
1996 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1999 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
2000 struct request_sock
*req
= cur
;
2002 icsk
= inet_csk(st
->syn_wait_sk
);
2006 if (req
->rsk_ops
->family
== st
->family
) {
2012 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2015 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2017 sk
= sk_next(st
->syn_wait_sk
);
2018 st
->state
= TCP_SEQ_STATE_LISTENING
;
2019 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2021 icsk
= inet_csk(sk
);
2022 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2023 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2025 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2029 sk_nulls_for_each_from(sk
, node
) {
2030 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
)) {
2034 icsk
= inet_csk(sk
);
2035 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2036 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2038 st
->uid
= sock_i_uid(sk
);
2039 st
->syn_wait_sk
= sk
;
2040 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2044 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2046 spin_unlock_bh(&ilb
->lock
);
2047 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2048 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2049 spin_lock_bh(&ilb
->lock
);
2050 sk
= sk_nulls_head(&ilb
->head
);
2058 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2060 void *rc
= listening_get_next(seq
, NULL
);
2062 while (rc
&& *pos
) {
2063 rc
= listening_get_next(seq
, rc
);
2069 static inline int empty_bucket(struct tcp_iter_state
*st
)
2071 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2072 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2075 static void *established_get_first(struct seq_file
*seq
)
2077 struct tcp_iter_state
*st
= seq
->private;
2078 struct net
*net
= seq_file_net(seq
);
2081 for (st
->bucket
= 0; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2083 struct hlist_nulls_node
*node
;
2084 struct inet_timewait_sock
*tw
;
2085 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2087 /* Lockless fast path for the common case of empty buckets */
2088 if (empty_bucket(st
))
2092 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2093 if (sk
->sk_family
!= st
->family
||
2094 !net_eq(sock_net(sk
), net
)) {
2100 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2101 inet_twsk_for_each(tw
, node
,
2102 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2103 if (tw
->tw_family
!= st
->family
||
2104 !net_eq(twsk_net(tw
), net
)) {
2110 spin_unlock_bh(lock
);
2111 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2117 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2119 struct sock
*sk
= cur
;
2120 struct inet_timewait_sock
*tw
;
2121 struct hlist_nulls_node
*node
;
2122 struct tcp_iter_state
*st
= seq
->private;
2123 struct net
*net
= seq_file_net(seq
);
2127 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2131 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2138 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2139 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2141 /* Look for next non empty bucket */
2142 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2145 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2148 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2149 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2151 sk
= sk_nulls_next(sk
);
2153 sk_nulls_for_each_from(sk
, node
) {
2154 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2158 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2159 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2167 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2169 void *rc
= established_get_first(seq
);
2172 rc
= established_get_next(seq
, rc
);
2178 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2181 struct tcp_iter_state
*st
= seq
->private;
2183 st
->state
= TCP_SEQ_STATE_LISTENING
;
2184 rc
= listening_get_idx(seq
, &pos
);
2187 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2188 rc
= established_get_idx(seq
, pos
);
2194 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2196 struct tcp_iter_state
*st
= seq
->private;
2197 st
->state
= TCP_SEQ_STATE_LISTENING
;
2199 return *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2202 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2205 struct tcp_iter_state
*st
;
2207 if (v
== SEQ_START_TOKEN
) {
2208 rc
= tcp_get_idx(seq
, 0);
2213 switch (st
->state
) {
2214 case TCP_SEQ_STATE_OPENREQ
:
2215 case TCP_SEQ_STATE_LISTENING
:
2216 rc
= listening_get_next(seq
, v
);
2218 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2219 rc
= established_get_first(seq
);
2222 case TCP_SEQ_STATE_ESTABLISHED
:
2223 case TCP_SEQ_STATE_TIME_WAIT
:
2224 rc
= established_get_next(seq
, v
);
2232 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2234 struct tcp_iter_state
*st
= seq
->private;
2236 switch (st
->state
) {
2237 case TCP_SEQ_STATE_OPENREQ
:
2239 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2240 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2242 case TCP_SEQ_STATE_LISTENING
:
2243 if (v
!= SEQ_START_TOKEN
)
2244 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2246 case TCP_SEQ_STATE_TIME_WAIT
:
2247 case TCP_SEQ_STATE_ESTABLISHED
:
2249 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2254 static int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2256 struct tcp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
2257 struct tcp_iter_state
*s
;
2260 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2261 sizeof(struct tcp_iter_state
));
2265 s
= ((struct seq_file
*)file
->private_data
)->private;
2266 s
->family
= afinfo
->family
;
2270 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2273 struct proc_dir_entry
*p
;
2275 afinfo
->seq_fops
.open
= tcp_seq_open
;
2276 afinfo
->seq_fops
.read
= seq_read
;
2277 afinfo
->seq_fops
.llseek
= seq_lseek
;
2278 afinfo
->seq_fops
.release
= seq_release_net
;
2280 afinfo
->seq_ops
.start
= tcp_seq_start
;
2281 afinfo
->seq_ops
.next
= tcp_seq_next
;
2282 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2284 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2285 &afinfo
->seq_fops
, afinfo
);
2291 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2293 proc_net_remove(net
, afinfo
->name
);
2296 static void get_openreq4(struct sock
*sk
, struct request_sock
*req
,
2297 struct seq_file
*f
, int i
, int uid
, int *len
)
2299 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2300 int ttd
= req
->expires
- jiffies
;
2302 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2303 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2306 ntohs(inet_sk(sk
)->inet_sport
),
2308 ntohs(ireq
->rmt_port
),
2310 0, 0, /* could print option size, but that is af dependent. */
2311 1, /* timers active (only the expire timer) */
2312 jiffies_to_clock_t(ttd
),
2315 0, /* non standard timer */
2316 0, /* open_requests have no inode */
2317 atomic_read(&sk
->sk_refcnt
),
2322 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2325 unsigned long timer_expires
;
2326 struct tcp_sock
*tp
= tcp_sk(sk
);
2327 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2328 struct inet_sock
*inet
= inet_sk(sk
);
2329 __be32 dest
= inet
->inet_daddr
;
2330 __be32 src
= inet
->inet_rcv_saddr
;
2331 __u16 destp
= ntohs(inet
->inet_dport
);
2332 __u16 srcp
= ntohs(inet
->inet_sport
);
2335 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2337 timer_expires
= icsk
->icsk_timeout
;
2338 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2340 timer_expires
= icsk
->icsk_timeout
;
2341 } else if (timer_pending(&sk
->sk_timer
)) {
2343 timer_expires
= sk
->sk_timer
.expires
;
2346 timer_expires
= jiffies
;
2349 if (sk
->sk_state
== TCP_LISTEN
)
2350 rx_queue
= sk
->sk_ack_backlog
;
2353 * because we dont lock socket, we might find a transient negative value
2355 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2357 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2358 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2359 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2360 tp
->write_seq
- tp
->snd_una
,
2363 jiffies_to_clock_t(timer_expires
- jiffies
),
2364 icsk
->icsk_retransmits
,
2366 icsk
->icsk_probes_out
,
2368 atomic_read(&sk
->sk_refcnt
), sk
,
2369 jiffies_to_clock_t(icsk
->icsk_rto
),
2370 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2371 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2373 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
,
2377 static void get_timewait4_sock(struct inet_timewait_sock
*tw
,
2378 struct seq_file
*f
, int i
, int *len
)
2382 int ttd
= tw
->tw_ttd
- jiffies
;
2387 dest
= tw
->tw_daddr
;
2388 src
= tw
->tw_rcv_saddr
;
2389 destp
= ntohs(tw
->tw_dport
);
2390 srcp
= ntohs(tw
->tw_sport
);
2392 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2393 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2394 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2395 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2396 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2401 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2403 struct tcp_iter_state
*st
;
2406 if (v
== SEQ_START_TOKEN
) {
2407 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2408 " sl local_address rem_address st tx_queue "
2409 "rx_queue tr tm->when retrnsmt uid timeout "
2415 switch (st
->state
) {
2416 case TCP_SEQ_STATE_LISTENING
:
2417 case TCP_SEQ_STATE_ESTABLISHED
:
2418 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2420 case TCP_SEQ_STATE_OPENREQ
:
2421 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2423 case TCP_SEQ_STATE_TIME_WAIT
:
2424 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2427 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2432 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2436 .owner
= THIS_MODULE
,
2439 .show
= tcp4_seq_show
,
2443 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2445 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2448 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2450 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2453 static struct pernet_operations tcp4_net_ops
= {
2454 .init
= tcp4_proc_init_net
,
2455 .exit
= tcp4_proc_exit_net
,
2458 int __init
tcp4_proc_init(void)
2460 return register_pernet_subsys(&tcp4_net_ops
);
2463 void tcp4_proc_exit(void)
2465 unregister_pernet_subsys(&tcp4_net_ops
);
2467 #endif /* CONFIG_PROC_FS */
2469 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2471 struct iphdr
*iph
= skb_gro_network_header(skb
);
2473 switch (skb
->ip_summed
) {
2474 case CHECKSUM_COMPLETE
:
2475 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2477 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2483 NAPI_GRO_CB(skb
)->flush
= 1;
2487 return tcp_gro_receive(head
, skb
);
2489 EXPORT_SYMBOL(tcp4_gro_receive
);
2491 int tcp4_gro_complete(struct sk_buff
*skb
)
2493 struct iphdr
*iph
= ip_hdr(skb
);
2494 struct tcphdr
*th
= tcp_hdr(skb
);
2496 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2497 iph
->saddr
, iph
->daddr
, 0);
2498 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2500 return tcp_gro_complete(skb
);
2502 EXPORT_SYMBOL(tcp4_gro_complete
);
2504 struct proto tcp_prot
= {
2506 .owner
= THIS_MODULE
,
2508 .connect
= tcp_v4_connect
,
2509 .disconnect
= tcp_disconnect
,
2510 .accept
= inet_csk_accept
,
2512 .init
= tcp_v4_init_sock
,
2513 .destroy
= tcp_v4_destroy_sock
,
2514 .shutdown
= tcp_shutdown
,
2515 .setsockopt
= tcp_setsockopt
,
2516 .getsockopt
= tcp_getsockopt
,
2517 .recvmsg
= tcp_recvmsg
,
2518 .backlog_rcv
= tcp_v4_do_rcv
,
2520 .unhash
= inet_unhash
,
2521 .get_port
= inet_csk_get_port
,
2522 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2523 .sockets_allocated
= &tcp_sockets_allocated
,
2524 .orphan_count
= &tcp_orphan_count
,
2525 .memory_allocated
= &tcp_memory_allocated
,
2526 .memory_pressure
= &tcp_memory_pressure
,
2527 .sysctl_mem
= sysctl_tcp_mem
,
2528 .sysctl_wmem
= sysctl_tcp_wmem
,
2529 .sysctl_rmem
= sysctl_tcp_rmem
,
2530 .max_header
= MAX_TCP_HEADER
,
2531 .obj_size
= sizeof(struct tcp_sock
),
2532 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2533 .twsk_prot
= &tcp_timewait_sock_ops
,
2534 .rsk_prot
= &tcp_request_sock_ops
,
2535 .h
.hashinfo
= &tcp_hashinfo
,
2536 #ifdef CONFIG_COMPAT
2537 .compat_setsockopt
= compat_tcp_setsockopt
,
2538 .compat_getsockopt
= compat_tcp_getsockopt
,
2543 static int __net_init
tcp_sk_init(struct net
*net
)
2545 return inet_ctl_sock_create(&net
->ipv4
.tcp_sock
,
2546 PF_INET
, SOCK_RAW
, IPPROTO_TCP
, net
);
2549 static void __net_exit
tcp_sk_exit(struct net
*net
)
2551 inet_ctl_sock_destroy(net
->ipv4
.tcp_sock
);
2554 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2556 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2559 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2560 .init
= tcp_sk_init
,
2561 .exit
= tcp_sk_exit
,
2562 .exit_batch
= tcp_sk_exit_batch
,
2565 void __init
tcp_v4_init(void)
2567 inet_hashinfo_init(&tcp_hashinfo
);
2568 if (register_pernet_subsys(&tcp_sk_ops
))
2569 panic("Failed to create the TCP control socket.\n");
2572 EXPORT_SYMBOL(ipv4_specific
);
2573 EXPORT_SYMBOL(tcp_hashinfo
);
2574 EXPORT_SYMBOL(tcp_prot
);
2575 EXPORT_SYMBOL(tcp_v4_conn_request
);
2576 EXPORT_SYMBOL(tcp_v4_connect
);
2577 EXPORT_SYMBOL(tcp_v4_do_rcv
);
2578 EXPORT_SYMBOL(tcp_v4_remember_stamp
);
2579 EXPORT_SYMBOL(tcp_v4_send_check
);
2580 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
2582 #ifdef CONFIG_PROC_FS
2583 EXPORT_SYMBOL(tcp_proc_register
);
2584 EXPORT_SYMBOL(tcp_proc_unregister
);
2586 EXPORT_SYMBOL(sysctl_tcp_low_latency
);