2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_tw_reuse __read_mostly
;
88 int sysctl_tcp_low_latency __read_mostly
;
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
92 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
95 struct inet_hashinfo tcp_hashinfo
;
96 EXPORT_SYMBOL(tcp_hashinfo
);
98 static __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
100 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
103 tcp_hdr(skb
)->source
);
106 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
108 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
109 struct tcp_sock
*tp
= tcp_sk(sk
);
111 /* With PAWS, it is safe from the viewpoint
112 of data integrity. Even without PAWS it is safe provided sequence
113 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115 Actually, the idea is close to VJ's one, only timestamp cache is
116 held not per host, but per port pair and TW bucket is used as state
119 If TW bucket has been already destroyed we fall back to VJ's scheme
120 and use initial timestamp retrieved from peer table.
122 if (tcptw
->tw_ts_recent_stamp
&&
123 (!twp
|| (sysctl_tcp_tw_reuse
&&
124 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
125 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
126 if (tp
->write_seq
== 0)
128 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
129 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
136 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
138 /* This will initiate an outgoing connection. */
139 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
141 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
142 struct inet_sock
*inet
= inet_sk(sk
);
143 struct tcp_sock
*tp
= tcp_sk(sk
);
144 __be16 orig_sport
, orig_dport
;
145 __be32 daddr
, nexthop
;
149 struct ip_options_rcu
*inet_opt
;
151 if (addr_len
< sizeof(struct sockaddr_in
))
154 if (usin
->sin_family
!= AF_INET
)
155 return -EAFNOSUPPORT
;
157 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
158 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
159 lockdep_sock_is_held(sk
));
160 if (inet_opt
&& inet_opt
->opt
.srr
) {
163 nexthop
= inet_opt
->opt
.faddr
;
166 orig_sport
= inet
->inet_sport
;
167 orig_dport
= usin
->sin_port
;
168 fl4
= &inet
->cork
.fl
.u
.ip4
;
169 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
170 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
172 orig_sport
, orig_dport
, sk
);
175 if (err
== -ENETUNREACH
)
176 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
180 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
185 if (!inet_opt
|| !inet_opt
->opt
.srr
)
188 if (!inet
->inet_saddr
)
189 inet
->inet_saddr
= fl4
->saddr
;
190 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
192 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
193 /* Reset inherited state */
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
196 if (likely(!tp
->repair
))
200 if (tcp_death_row
.sysctl_tw_recycle
&&
201 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
202 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
204 inet
->inet_dport
= usin
->sin_port
;
205 sk_daddr_set(sk
, daddr
);
207 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
209 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
211 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk
, TCP_SYN_SENT
);
219 err
= inet_hash_connect(&tcp_death_row
, sk
);
225 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
226 inet
->inet_sport
, inet
->inet_dport
, sk
);
232 /* OK, now commit destination to socket. */
233 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
234 sk_setup_caps(sk
, &rt
->dst
);
236 if (!tp
->write_seq
&& likely(!tp
->repair
))
237 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
242 inet
->inet_id
= tp
->write_seq
^ jiffies
;
244 err
= tcp_connect(sk
);
254 * This unhashes the socket and releases the local port,
257 tcp_set_state(sk
, TCP_CLOSE
);
259 sk
->sk_route_caps
= 0;
260 inet
->inet_dport
= 0;
263 EXPORT_SYMBOL(tcp_v4_connect
);
266 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
267 * It can be called through tcp_release_cb() if socket was owned by user
268 * at the time tcp_v4_err() was called to handle ICMP message.
270 void tcp_v4_mtu_reduced(struct sock
*sk
)
272 struct inet_sock
*inet
= inet_sk(sk
);
273 struct dst_entry
*dst
;
276 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
278 mtu
= tcp_sk(sk
)->mtu_info
;
279 dst
= inet_csk_update_pmtu(sk
, mtu
);
283 /* Something is about to be wrong... Remember soft error
284 * for the case, if this connection will not able to recover.
286 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
287 sk
->sk_err_soft
= EMSGSIZE
;
291 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
292 ip_sk_accept_pmtu(sk
) &&
293 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
294 tcp_sync_mss(sk
, mtu
);
296 /* Resend the TCP packet because it's
297 * clear that the old packet has been
298 * dropped. This is the new "fast" path mtu
301 tcp_simple_retransmit(sk
);
302 } /* else let the usual retransmit timer handle it */
304 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
306 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
308 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
311 dst
->ops
->redirect(dst
, sk
, skb
);
315 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
316 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
318 struct request_sock
*req
= inet_reqsk(sk
);
319 struct net
*net
= sock_net(sk
);
321 /* ICMPs are not backlogged, hence we cannot get
322 * an established socket here.
324 if (seq
!= tcp_rsk(req
)->snt_isn
) {
325 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
328 * Still in SYN_RECV, just remove it silently.
329 * There is no good way to pass the error to the newly
330 * created socket, and POSIX does not want network
331 * errors returned from accept().
333 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
334 tcp_listendrop(req
->rsk_listener
);
338 EXPORT_SYMBOL(tcp_req_err
);
341 * This routine is called by the ICMP module when it gets some
342 * sort of error condition. If err < 0 then the socket should
343 * be closed and the error returned to the user. If err > 0
344 * it's just the icmp type << 8 | icmp code. After adjustment
345 * header points to the first 8 bytes of the tcp header. We need
346 * to find the appropriate port.
348 * The locking strategy used here is very "optimistic". When
349 * someone else accesses the socket the ICMP is just dropped
350 * and for some paths there is no check at all.
351 * A more general error queue to queue errors for later handling
352 * is probably better.
356 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
358 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
359 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
360 struct inet_connection_sock
*icsk
;
362 struct inet_sock
*inet
;
363 const int type
= icmp_hdr(icmp_skb
)->type
;
364 const int code
= icmp_hdr(icmp_skb
)->code
;
367 struct request_sock
*fastopen
;
371 struct net
*net
= dev_net(icmp_skb
->dev
);
373 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
374 th
->dest
, iph
->saddr
, ntohs(th
->source
),
377 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
380 if (sk
->sk_state
== TCP_TIME_WAIT
) {
381 inet_twsk_put(inet_twsk(sk
));
384 seq
= ntohl(th
->seq
);
385 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
386 return tcp_req_err(sk
, seq
,
387 type
== ICMP_PARAMETERPROB
||
388 type
== ICMP_TIME_EXCEEDED
||
389 (type
== ICMP_DEST_UNREACH
&&
390 (code
== ICMP_NET_UNREACH
||
391 code
== ICMP_HOST_UNREACH
)));
394 /* If too many ICMPs get dropped on busy
395 * servers this needs to be solved differently.
396 * We do take care of PMTU discovery (RFC1191) special case :
397 * we can receive locally generated ICMP messages while socket is held.
399 if (sock_owned_by_user(sk
)) {
400 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
401 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
403 if (sk
->sk_state
== TCP_CLOSE
)
406 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
407 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
413 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
414 fastopen
= tp
->fastopen_rsk
;
415 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
416 if (sk
->sk_state
!= TCP_LISTEN
&&
417 !between(seq
, snd_una
, tp
->snd_nxt
)) {
418 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
424 if (!sock_owned_by_user(sk
))
425 do_redirect(icmp_skb
, sk
);
427 case ICMP_SOURCE_QUENCH
:
428 /* Just silently ignore these. */
430 case ICMP_PARAMETERPROB
:
433 case ICMP_DEST_UNREACH
:
434 if (code
> NR_ICMP_UNREACH
)
437 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
438 /* We are not interested in TCP_LISTEN and open_requests
439 * (SYN-ACKs send out by Linux are always <576bytes so
440 * they should go through unfragmented).
442 if (sk
->sk_state
== TCP_LISTEN
)
446 if (!sock_owned_by_user(sk
)) {
447 tcp_v4_mtu_reduced(sk
);
449 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
455 err
= icmp_err_convert
[code
].errno
;
456 /* check if icmp_skb allows revert of backoff
457 * (see draft-zimmermann-tcp-lcd) */
458 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
460 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
461 !icsk
->icsk_backoff
|| fastopen
)
464 if (sock_owned_by_user(sk
))
467 icsk
->icsk_backoff
--;
468 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
470 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
472 skb
= tcp_write_queue_head(sk
);
475 remaining
= icsk
->icsk_rto
-
477 tcp_time_stamp
- tcp_skb_timestamp(skb
));
480 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
481 remaining
, TCP_RTO_MAX
);
483 /* RTO revert clocked out retransmission.
484 * Will retransmit now */
485 tcp_retransmit_timer(sk
);
489 case ICMP_TIME_EXCEEDED
:
496 switch (sk
->sk_state
) {
499 /* Only in fast or simultaneous open. If a fast open socket is
500 * is already accepted it is treated as a connected one below.
502 if (fastopen
&& !fastopen
->sk
)
505 if (!sock_owned_by_user(sk
)) {
508 sk
->sk_error_report(sk
);
512 sk
->sk_err_soft
= err
;
517 /* If we've already connected we will keep trying
518 * until we time out, or the user gives up.
520 * rfc1122 4.2.3.9 allows to consider as hard errors
521 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
522 * but it is obsoleted by pmtu discovery).
524 * Note, that in modern internet, where routing is unreliable
525 * and in each dark corner broken firewalls sit, sending random
526 * errors ordered by their masters even this two messages finally lose
527 * their original sense (even Linux sends invalid PORT_UNREACHs)
529 * Now we are in compliance with RFCs.
534 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
536 sk
->sk_error_report(sk
);
537 } else { /* Only an error on timeout */
538 sk
->sk_err_soft
= err
;
546 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
548 struct tcphdr
*th
= tcp_hdr(skb
);
550 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
551 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
552 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
553 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
555 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
562 /* This routine computes an IPv4 TCP checksum. */
563 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
565 const struct inet_sock
*inet
= inet_sk(sk
);
567 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
569 EXPORT_SYMBOL(tcp_v4_send_check
);
572 * This routine will send an RST to the other tcp.
574 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
576 * Answer: if a packet caused RST, it is not for a socket
577 * existing in our system, if it is matched to a socket,
578 * it is just duplicate segment or bug in other side's TCP.
579 * So that we build reply only basing on parameters
580 * arrived with segment.
581 * Exception: precedence violation. We do not implement it in any case.
584 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
586 const struct tcphdr
*th
= tcp_hdr(skb
);
589 #ifdef CONFIG_TCP_MD5SIG
590 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
593 struct ip_reply_arg arg
;
594 #ifdef CONFIG_TCP_MD5SIG
595 struct tcp_md5sig_key
*key
= NULL
;
596 const __u8
*hash_location
= NULL
;
597 unsigned char newhash
[16];
599 struct sock
*sk1
= NULL
;
603 /* Never send a reset in response to a reset. */
607 /* If sk not NULL, it means we did a successful lookup and incoming
608 * route had to be correct. prequeue might have dropped our dst.
610 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
613 /* Swap the send and the receive. */
614 memset(&rep
, 0, sizeof(rep
));
615 rep
.th
.dest
= th
->source
;
616 rep
.th
.source
= th
->dest
;
617 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
621 rep
.th
.seq
= th
->ack_seq
;
624 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
625 skb
->len
- (th
->doff
<< 2));
628 memset(&arg
, 0, sizeof(arg
));
629 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
630 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
632 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
633 #ifdef CONFIG_TCP_MD5SIG
635 hash_location
= tcp_parse_md5sig_option(th
);
636 if (sk
&& sk_fullsock(sk
)) {
637 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
638 &ip_hdr(skb
)->saddr
, AF_INET
);
639 } else if (hash_location
) {
641 * active side is lost. Try to find listening socket through
642 * source port, and then find md5 key through listening socket.
643 * we are not loose security here:
644 * Incoming packet is checked with md5 hash with finding key,
645 * no RST generated if md5 hash doesn't match.
647 sk1
= __inet_lookup_listener(net
, &tcp_hashinfo
, NULL
, 0,
649 th
->source
, ip_hdr(skb
)->daddr
,
650 ntohs(th
->source
), inet_iif(skb
));
651 /* don't send rst if it can't find key */
655 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
656 &ip_hdr(skb
)->saddr
, AF_INET
);
661 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
662 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
668 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
670 (TCPOPT_MD5SIG
<< 8) |
672 /* Update length and the length the header thinks exists */
673 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
674 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
676 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
677 key
, ip_hdr(skb
)->saddr
,
678 ip_hdr(skb
)->daddr
, &rep
.th
);
681 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
682 ip_hdr(skb
)->saddr
, /* XXX */
683 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
684 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
685 arg
.flags
= (sk
&& inet_sk_transparent(sk
)) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
687 /* When socket is gone, all binding information is lost.
688 * routing might fail in this case. No choice here, if we choose to force
689 * input interface, we will misroute in case of asymmetric route.
692 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
694 BUILD_BUG_ON(offsetof(struct sock
, sk_bound_dev_if
) !=
695 offsetof(struct inet_timewait_sock
, tw_bound_dev_if
));
697 arg
.tos
= ip_hdr(skb
)->tos
;
699 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
700 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
701 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
702 &arg
, arg
.iov
[0].iov_len
);
704 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
705 __TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
708 #ifdef CONFIG_TCP_MD5SIG
714 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
715 outside socket context is ugly, certainly. What can I do?
718 static void tcp_v4_send_ack(struct net
*net
,
719 struct sk_buff
*skb
, u32 seq
, u32 ack
,
720 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
721 struct tcp_md5sig_key
*key
,
722 int reply_flags
, u8 tos
)
724 const struct tcphdr
*th
= tcp_hdr(skb
);
727 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
728 #ifdef CONFIG_TCP_MD5SIG
729 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
733 struct ip_reply_arg arg
;
735 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
736 memset(&arg
, 0, sizeof(arg
));
738 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
739 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
741 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
742 (TCPOPT_TIMESTAMP
<< 8) |
744 rep
.opt
[1] = htonl(tsval
);
745 rep
.opt
[2] = htonl(tsecr
);
746 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
749 /* Swap the send and the receive. */
750 rep
.th
.dest
= th
->source
;
751 rep
.th
.source
= th
->dest
;
752 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
753 rep
.th
.seq
= htonl(seq
);
754 rep
.th
.ack_seq
= htonl(ack
);
756 rep
.th
.window
= htons(win
);
758 #ifdef CONFIG_TCP_MD5SIG
760 int offset
= (tsecr
) ? 3 : 0;
762 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
764 (TCPOPT_MD5SIG
<< 8) |
766 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
767 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
769 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
770 key
, ip_hdr(skb
)->saddr
,
771 ip_hdr(skb
)->daddr
, &rep
.th
);
774 arg
.flags
= reply_flags
;
775 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
776 ip_hdr(skb
)->saddr
, /* XXX */
777 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
778 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
780 arg
.bound_dev_if
= oif
;
783 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
784 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
785 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
786 &arg
, arg
.iov
[0].iov_len
);
788 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
792 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
794 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
795 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
797 tcp_v4_send_ack(sock_net(sk
), skb
,
798 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
799 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
800 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
803 tcp_twsk_md5_key(tcptw
),
804 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
811 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
812 struct request_sock
*req
)
814 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
815 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
817 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
821 * The window field (SEG.WND) of every outgoing segment, with the
822 * exception of <SYN> segments, MUST be right-shifted by
823 * Rcv.Wind.Shift bits:
825 tcp_v4_send_ack(sock_net(sk
), skb
, seq
,
826 tcp_rsk(req
)->rcv_nxt
,
827 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
831 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
833 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
838 * Send a SYN-ACK after having received a SYN.
839 * This still operates on a request_sock only, not on a big
842 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
844 struct request_sock
*req
,
845 struct tcp_fastopen_cookie
*foc
,
846 enum tcp_synack_type synack_type
)
848 const struct inet_request_sock
*ireq
= inet_rsk(req
);
853 /* First, grab a route. */
854 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
857 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
860 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
862 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
865 err
= net_xmit_eval(err
);
872 * IPv4 request_sock destructor.
874 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
876 kfree(inet_rsk(req
)->opt
);
879 #ifdef CONFIG_TCP_MD5SIG
881 * RFC2385 MD5 checksumming requires a mapping of
882 * IP address->MD5 Key.
883 * We need to maintain these in the sk structure.
886 /* Find the Key structure for an address. */
887 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
888 const union tcp_md5_addr
*addr
,
891 const struct tcp_sock
*tp
= tcp_sk(sk
);
892 struct tcp_md5sig_key
*key
;
893 unsigned int size
= sizeof(struct in_addr
);
894 const struct tcp_md5sig_info
*md5sig
;
896 /* caller either holds rcu_read_lock() or socket lock */
897 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
898 lockdep_sock_is_held(sk
));
901 #if IS_ENABLED(CONFIG_IPV6)
902 if (family
== AF_INET6
)
903 size
= sizeof(struct in6_addr
);
905 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
906 if (key
->family
!= family
)
908 if (!memcmp(&key
->addr
, addr
, size
))
913 EXPORT_SYMBOL(tcp_md5_do_lookup
);
915 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
916 const struct sock
*addr_sk
)
918 const union tcp_md5_addr
*addr
;
920 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
921 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
923 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
925 /* This can be called on a newly created socket, from other files */
926 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
927 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
929 /* Add Key to the list */
930 struct tcp_md5sig_key
*key
;
931 struct tcp_sock
*tp
= tcp_sk(sk
);
932 struct tcp_md5sig_info
*md5sig
;
934 key
= tcp_md5_do_lookup(sk
, addr
, family
);
936 /* Pre-existing entry - just update that one. */
937 memcpy(key
->key
, newkey
, newkeylen
);
938 key
->keylen
= newkeylen
;
942 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
943 lockdep_sock_is_held(sk
));
945 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
949 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
950 INIT_HLIST_HEAD(&md5sig
->head
);
951 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
954 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
957 if (!tcp_alloc_md5sig_pool()) {
958 sock_kfree_s(sk
, key
, sizeof(*key
));
962 memcpy(key
->key
, newkey
, newkeylen
);
963 key
->keylen
= newkeylen
;
964 key
->family
= family
;
965 memcpy(&key
->addr
, addr
,
966 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
967 sizeof(struct in_addr
));
968 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
971 EXPORT_SYMBOL(tcp_md5_do_add
);
973 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
975 struct tcp_md5sig_key
*key
;
977 key
= tcp_md5_do_lookup(sk
, addr
, family
);
980 hlist_del_rcu(&key
->node
);
981 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
985 EXPORT_SYMBOL(tcp_md5_do_del
);
987 static void tcp_clear_md5_list(struct sock
*sk
)
989 struct tcp_sock
*tp
= tcp_sk(sk
);
990 struct tcp_md5sig_key
*key
;
991 struct hlist_node
*n
;
992 struct tcp_md5sig_info
*md5sig
;
994 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
996 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
997 hlist_del_rcu(&key
->node
);
998 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1003 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1006 struct tcp_md5sig cmd
;
1007 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1009 if (optlen
< sizeof(cmd
))
1012 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1015 if (sin
->sin_family
!= AF_INET
)
1018 if (!cmd
.tcpm_keylen
)
1019 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1022 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1025 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1026 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1030 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
1031 __be32 daddr
, __be32 saddr
,
1032 const struct tcphdr
*th
, int nbytes
)
1034 struct tcp4_pseudohdr
*bp
;
1035 struct scatterlist sg
;
1042 bp
->protocol
= IPPROTO_TCP
;
1043 bp
->len
= cpu_to_be16(nbytes
);
1045 _th
= (struct tcphdr
*)(bp
+ 1);
1046 memcpy(_th
, th
, sizeof(*th
));
1049 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
1050 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
1051 sizeof(*bp
) + sizeof(*th
));
1052 return crypto_ahash_update(hp
->md5_req
);
1055 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1056 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1058 struct tcp_md5sig_pool
*hp
;
1059 struct ahash_request
*req
;
1061 hp
= tcp_get_md5sig_pool();
1063 goto clear_hash_noput
;
1066 if (crypto_ahash_init(req
))
1068 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
1070 if (tcp_md5_hash_key(hp
, key
))
1072 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1073 if (crypto_ahash_final(req
))
1076 tcp_put_md5sig_pool();
1080 tcp_put_md5sig_pool();
1082 memset(md5_hash
, 0, 16);
1086 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1087 const struct sock
*sk
,
1088 const struct sk_buff
*skb
)
1090 struct tcp_md5sig_pool
*hp
;
1091 struct ahash_request
*req
;
1092 const struct tcphdr
*th
= tcp_hdr(skb
);
1093 __be32 saddr
, daddr
;
1095 if (sk
) { /* valid for establish/request sockets */
1096 saddr
= sk
->sk_rcv_saddr
;
1097 daddr
= sk
->sk_daddr
;
1099 const struct iphdr
*iph
= ip_hdr(skb
);
1104 hp
= tcp_get_md5sig_pool();
1106 goto clear_hash_noput
;
1109 if (crypto_ahash_init(req
))
1112 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
1114 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1116 if (tcp_md5_hash_key(hp
, key
))
1118 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1119 if (crypto_ahash_final(req
))
1122 tcp_put_md5sig_pool();
1126 tcp_put_md5sig_pool();
1128 memset(md5_hash
, 0, 16);
1131 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1135 /* Called with rcu_read_lock() */
1136 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1137 const struct sk_buff
*skb
)
1139 #ifdef CONFIG_TCP_MD5SIG
1141 * This gets called for each TCP segment that arrives
1142 * so we want to be efficient.
1143 * We have 3 drop cases:
1144 * o No MD5 hash and one expected.
1145 * o MD5 hash and we're not expecting one.
1146 * o MD5 hash and its wrong.
1148 const __u8
*hash_location
= NULL
;
1149 struct tcp_md5sig_key
*hash_expected
;
1150 const struct iphdr
*iph
= ip_hdr(skb
);
1151 const struct tcphdr
*th
= tcp_hdr(skb
);
1153 unsigned char newhash
[16];
1155 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1157 hash_location
= tcp_parse_md5sig_option(th
);
1159 /* We've parsed the options - do we have a hash? */
1160 if (!hash_expected
&& !hash_location
)
1163 if (hash_expected
&& !hash_location
) {
1164 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1168 if (!hash_expected
&& hash_location
) {
1169 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1173 /* Okay, so this is hash_expected and hash_location -
1174 * so we need to calculate the checksum.
1176 genhash
= tcp_v4_md5_hash_skb(newhash
,
1180 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1181 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
1182 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1183 &iph
->saddr
, ntohs(th
->source
),
1184 &iph
->daddr
, ntohs(th
->dest
),
1185 genhash
? " tcp_v4_calc_md5_hash failed"
1194 static void tcp_v4_init_req(struct request_sock
*req
,
1195 const struct sock
*sk_listener
,
1196 struct sk_buff
*skb
)
1198 struct inet_request_sock
*ireq
= inet_rsk(req
);
1200 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1201 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1202 ireq
->opt
= tcp_v4_save_options(skb
);
1205 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1207 const struct request_sock
*req
,
1210 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1213 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1222 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1224 .obj_size
= sizeof(struct tcp_request_sock
),
1225 .rtx_syn_ack
= tcp_rtx_synack
,
1226 .send_ack
= tcp_v4_reqsk_send_ack
,
1227 .destructor
= tcp_v4_reqsk_destructor
,
1228 .send_reset
= tcp_v4_send_reset
,
1229 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1232 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1233 .mss_clamp
= TCP_MSS_DEFAULT
,
1234 #ifdef CONFIG_TCP_MD5SIG
1235 .req_md5_lookup
= tcp_v4_md5_lookup
,
1236 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1238 .init_req
= tcp_v4_init_req
,
1239 #ifdef CONFIG_SYN_COOKIES
1240 .cookie_init_seq
= cookie_v4_init_sequence
,
1242 .route_req
= tcp_v4_route_req
,
1243 .init_seq
= tcp_v4_init_sequence
,
1244 .send_synack
= tcp_v4_send_synack
,
1247 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1249 /* Never answer to SYNs send to broadcast or multicast */
1250 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1253 return tcp_conn_request(&tcp_request_sock_ops
,
1254 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1260 EXPORT_SYMBOL(tcp_v4_conn_request
);
1264 * The three way handshake has completed - we got a valid synack -
1265 * now create the new socket.
1267 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1268 struct request_sock
*req
,
1269 struct dst_entry
*dst
,
1270 struct request_sock
*req_unhash
,
1273 struct inet_request_sock
*ireq
;
1274 struct inet_sock
*newinet
;
1275 struct tcp_sock
*newtp
;
1277 #ifdef CONFIG_TCP_MD5SIG
1278 struct tcp_md5sig_key
*key
;
1280 struct ip_options_rcu
*inet_opt
;
1282 if (sk_acceptq_is_full(sk
))
1285 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1289 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1290 inet_sk_rx_dst_set(newsk
, skb
);
1292 newtp
= tcp_sk(newsk
);
1293 newinet
= inet_sk(newsk
);
1294 ireq
= inet_rsk(req
);
1295 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1296 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1297 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1298 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1299 inet_opt
= ireq
->opt
;
1300 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1302 newinet
->mc_index
= inet_iif(skb
);
1303 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1304 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1305 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1307 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1308 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1311 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1315 /* syncookie case : see end of cookie_v4_check() */
1317 sk_setup_caps(newsk
, dst
);
1319 tcp_ca_openreq_child(newsk
, dst
);
1321 tcp_sync_mss(newsk
, dst_mtu(dst
));
1322 newtp
->advmss
= dst_metric_advmss(dst
);
1323 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1324 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1325 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1327 tcp_initialize_rcv_mss(newsk
);
1329 #ifdef CONFIG_TCP_MD5SIG
1330 /* Copy over the MD5 key from the original socket */
1331 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1335 * We're using one, so create a matching key
1336 * on the newsk structure. If we fail to get
1337 * memory, then we end up not copying the key
1340 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1341 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1342 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1346 if (__inet_inherit_port(sk
, newsk
) < 0)
1348 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1350 tcp_move_syn(newtp
, req
);
1355 NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1362 inet_csk_prepare_forced_close(newsk
);
1366 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1368 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1370 #ifdef CONFIG_SYN_COOKIES
1371 const struct tcphdr
*th
= tcp_hdr(skb
);
1374 sk
= cookie_v4_check(sk
, skb
);
1379 /* The socket must have it's spinlock held when we get
1380 * here, unless it is a TCP_LISTEN socket.
1382 * We have a potential double-lock case here, so even when
1383 * doing backlog processing we use the BH locking scheme.
1384 * This is because we cannot sleep with the original spinlock
1387 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1391 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1392 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1394 sock_rps_save_rxhash(sk
, skb
);
1395 sk_mark_napi_id(sk
, skb
);
1397 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1398 !dst
->ops
->check(dst
, 0)) {
1400 sk
->sk_rx_dst
= NULL
;
1403 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1407 if (tcp_checksum_complete(skb
))
1410 if (sk
->sk_state
== TCP_LISTEN
) {
1411 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1416 sock_rps_save_rxhash(nsk
, skb
);
1417 sk_mark_napi_id(nsk
, skb
);
1418 if (tcp_child_process(sk
, nsk
, skb
)) {
1425 sock_rps_save_rxhash(sk
, skb
);
1427 if (tcp_rcv_state_process(sk
, skb
)) {
1434 tcp_v4_send_reset(rsk
, skb
);
1437 /* Be careful here. If this function gets more complicated and
1438 * gcc suffers from register pressure on the x86, sk (in %ebx)
1439 * might be destroyed here. This current version compiles correctly,
1440 * but you have been warned.
1445 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1446 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1449 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1451 void tcp_v4_early_demux(struct sk_buff
*skb
)
1453 const struct iphdr
*iph
;
1454 const struct tcphdr
*th
;
1457 if (skb
->pkt_type
!= PACKET_HOST
)
1460 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1466 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1469 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1470 iph
->saddr
, th
->source
,
1471 iph
->daddr
, ntohs(th
->dest
),
1475 skb
->destructor
= sock_edemux
;
1476 if (sk_fullsock(sk
)) {
1477 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1480 dst
= dst_check(dst
, 0);
1482 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1483 skb_dst_set_noref(skb
, dst
);
1488 /* Packet is added to VJ-style prequeue for processing in process
1489 * context, if a reader task is waiting. Apparently, this exciting
1490 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1491 * failed somewhere. Latency? Burstiness? Well, at least now we will
1492 * see, why it failed. 8)8) --ANK
1495 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1497 struct tcp_sock
*tp
= tcp_sk(sk
);
1499 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1502 if (skb
->len
<= tcp_hdrlen(skb
) &&
1503 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1506 /* Before escaping RCU protected region, we need to take care of skb
1507 * dst. Prequeue is only enabled for established sockets.
1508 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1509 * Instead of doing full sk_rx_dst validity here, let's perform
1510 * an optimistic check.
1512 if (likely(sk
->sk_rx_dst
))
1515 skb_dst_force_safe(skb
);
1517 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1518 tp
->ucopy
.memory
+= skb
->truesize
;
1519 if (skb_queue_len(&tp
->ucopy
.prequeue
) >= 32 ||
1520 tp
->ucopy
.memory
+ atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
) {
1521 struct sk_buff
*skb1
;
1523 BUG_ON(sock_owned_by_user(sk
));
1524 __NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPPREQUEUEDROPPED
,
1525 skb_queue_len(&tp
->ucopy
.prequeue
));
1527 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1528 sk_backlog_rcv(sk
, skb1
);
1530 tp
->ucopy
.memory
= 0;
1531 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1532 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1533 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1534 if (!inet_csk_ack_scheduled(sk
))
1535 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1536 (3 * tcp_rto_min(sk
)) / 4,
1541 EXPORT_SYMBOL(tcp_prequeue
);
1543 bool tcp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
1545 u32 limit
= sk
->sk_rcvbuf
+ sk
->sk_sndbuf
;
1547 /* Only socket owner can try to collapse/prune rx queues
1548 * to reduce memory overhead, so add a little headroom here.
1549 * Few sockets backlog are possibly concurrently non empty.
1553 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1554 * we can fix skb->truesize to its real value to avoid future drops.
1555 * This is valid because skb is not yet charged to the socket.
1556 * It has been noticed pure SACK packets were sometimes dropped
1557 * (if cooked by drivers without copybreak feature).
1560 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
1562 if (unlikely(sk_add_backlog(sk
, skb
, limit
))) {
1564 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPBACKLOGDROP
);
1569 EXPORT_SYMBOL(tcp_add_backlog
);
1571 int tcp_filter(struct sock
*sk
, struct sk_buff
*skb
)
1573 struct tcphdr
*th
= (struct tcphdr
*)skb
->data
;
1574 unsigned int eaten
= skb
->len
;
1577 err
= sk_filter_trim_cap(sk
, skb
, th
->doff
* 4);
1580 TCP_SKB_CB(skb
)->end_seq
-= eaten
;
1584 EXPORT_SYMBOL(tcp_filter
);
1590 int tcp_v4_rcv(struct sk_buff
*skb
)
1592 struct net
*net
= dev_net(skb
->dev
);
1593 const struct iphdr
*iph
;
1594 const struct tcphdr
*th
;
1599 if (skb
->pkt_type
!= PACKET_HOST
)
1602 /* Count it even if it's bad */
1603 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1605 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1608 th
= (const struct tcphdr
*)skb
->data
;
1610 if (unlikely(th
->doff
< sizeof(struct tcphdr
) / 4))
1612 if (!pskb_may_pull(skb
, th
->doff
* 4))
1615 /* An explanation is required here, I think.
1616 * Packet length and doff are validated by header prediction,
1617 * provided case of th->doff==0 is eliminated.
1618 * So, we defer the checks. */
1620 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1623 th
= (const struct tcphdr
*)skb
->data
;
1625 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1626 * barrier() makes sure compiler wont play fool^Waliasing games.
1628 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1629 sizeof(struct inet_skb_parm
));
1632 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1633 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1634 skb
->len
- th
->doff
* 4);
1635 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1636 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1637 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1638 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1639 TCP_SKB_CB(skb
)->sacked
= 0;
1642 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
), th
->source
,
1643 th
->dest
, &refcounted
);
1648 if (sk
->sk_state
== TCP_TIME_WAIT
)
1651 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1652 struct request_sock
*req
= inet_reqsk(sk
);
1655 sk
= req
->rsk_listener
;
1656 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1657 sk_drops_add(sk
, skb
);
1661 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1662 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1665 /* We own a reference on the listener, increase it again
1666 * as we might lose it too soon.
1670 nsk
= tcp_check_req(sk
, skb
, req
, false);
1673 goto discard_and_relse
;
1677 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1678 tcp_v4_send_reset(nsk
, skb
);
1679 goto discard_and_relse
;
1685 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1686 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1687 goto discard_and_relse
;
1690 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1691 goto discard_and_relse
;
1693 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1694 goto discard_and_relse
;
1698 if (tcp_filter(sk
, skb
))
1699 goto discard_and_relse
;
1700 th
= (const struct tcphdr
*)skb
->data
;
1705 if (sk
->sk_state
== TCP_LISTEN
) {
1706 ret
= tcp_v4_do_rcv(sk
, skb
);
1707 goto put_and_return
;
1710 sk_incoming_cpu_update(sk
);
1712 bh_lock_sock_nested(sk
);
1713 tcp_segs_in(tcp_sk(sk
), skb
);
1715 if (!sock_owned_by_user(sk
)) {
1716 if (!tcp_prequeue(sk
, skb
))
1717 ret
= tcp_v4_do_rcv(sk
, skb
);
1718 } else if (tcp_add_backlog(sk
, skb
)) {
1719 goto discard_and_relse
;
1730 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1733 if (tcp_checksum_complete(skb
)) {
1735 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1737 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1739 tcp_v4_send_reset(NULL
, skb
);
1743 /* Discard frame. */
1748 sk_drops_add(sk
, skb
);
1754 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1755 inet_twsk_put(inet_twsk(sk
));
1759 if (tcp_checksum_complete(skb
)) {
1760 inet_twsk_put(inet_twsk(sk
));
1763 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1765 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1768 iph
->saddr
, th
->source
,
1769 iph
->daddr
, th
->dest
,
1772 inet_twsk_deschedule_put(inet_twsk(sk
));
1777 /* Fall through to ACK */
1780 tcp_v4_timewait_ack(sk
, skb
);
1783 tcp_v4_send_reset(sk
, skb
);
1784 inet_twsk_deschedule_put(inet_twsk(sk
));
1786 case TCP_TW_SUCCESS
:;
1791 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1792 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1793 .twsk_unique
= tcp_twsk_unique
,
1794 .twsk_destructor
= tcp_twsk_destructor
,
1797 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1799 struct dst_entry
*dst
= skb_dst(skb
);
1801 if (dst
&& dst_hold_safe(dst
)) {
1802 sk
->sk_rx_dst
= dst
;
1803 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1806 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1808 const struct inet_connection_sock_af_ops ipv4_specific
= {
1809 .queue_xmit
= ip_queue_xmit
,
1810 .send_check
= tcp_v4_send_check
,
1811 .rebuild_header
= inet_sk_rebuild_header
,
1812 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1813 .conn_request
= tcp_v4_conn_request
,
1814 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1815 .net_header_len
= sizeof(struct iphdr
),
1816 .setsockopt
= ip_setsockopt
,
1817 .getsockopt
= ip_getsockopt
,
1818 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1819 .sockaddr_len
= sizeof(struct sockaddr_in
),
1820 .bind_conflict
= inet_csk_bind_conflict
,
1821 #ifdef CONFIG_COMPAT
1822 .compat_setsockopt
= compat_ip_setsockopt
,
1823 .compat_getsockopt
= compat_ip_getsockopt
,
1825 .mtu_reduced
= tcp_v4_mtu_reduced
,
1827 EXPORT_SYMBOL(ipv4_specific
);
1829 #ifdef CONFIG_TCP_MD5SIG
1830 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1831 .md5_lookup
= tcp_v4_md5_lookup
,
1832 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1833 .md5_parse
= tcp_v4_parse_md5_keys
,
1837 /* NOTE: A lot of things set to zero explicitly by call to
1838 * sk_alloc() so need not be done here.
1840 static int tcp_v4_init_sock(struct sock
*sk
)
1842 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1846 icsk
->icsk_af_ops
= &ipv4_specific
;
1848 #ifdef CONFIG_TCP_MD5SIG
1849 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1855 void tcp_v4_destroy_sock(struct sock
*sk
)
1857 struct tcp_sock
*tp
= tcp_sk(sk
);
1859 tcp_clear_xmit_timers(sk
);
1861 tcp_cleanup_congestion_control(sk
);
1863 /* Cleanup up the write buffer. */
1864 tcp_write_queue_purge(sk
);
1866 /* Cleans up our, hopefully empty, out_of_order_queue. */
1867 skb_rbtree_purge(&tp
->out_of_order_queue
);
1869 #ifdef CONFIG_TCP_MD5SIG
1870 /* Clean up the MD5 key list, if any */
1871 if (tp
->md5sig_info
) {
1872 tcp_clear_md5_list(sk
);
1873 kfree_rcu(tp
->md5sig_info
, rcu
);
1874 tp
->md5sig_info
= NULL
;
1878 /* Clean prequeue, it must be empty really */
1879 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1881 /* Clean up a referenced TCP bind bucket. */
1882 if (inet_csk(sk
)->icsk_bind_hash
)
1885 BUG_ON(tp
->fastopen_rsk
);
1887 /* If socket is aborted during connect operation */
1888 tcp_free_fastopen_req(tp
);
1889 tcp_saved_syn_free(tp
);
1892 sk_sockets_allocated_dec(sk
);
1895 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1897 #ifdef CONFIG_PROC_FS
1898 /* Proc filesystem TCP sock list dumping. */
1901 * Get next listener socket follow cur. If cur is NULL, get first socket
1902 * starting from bucket given in st->bucket; when st->bucket is zero the
1903 * very first socket in the hash table is returned.
1905 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1907 struct tcp_iter_state
*st
= seq
->private;
1908 struct net
*net
= seq_file_net(seq
);
1909 struct inet_listen_hashbucket
*ilb
;
1910 struct sock
*sk
= cur
;
1914 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1915 spin_lock_bh(&ilb
->lock
);
1916 sk
= sk_head(&ilb
->head
);
1920 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1926 sk_for_each_from(sk
) {
1927 if (!net_eq(sock_net(sk
), net
))
1929 if (sk
->sk_family
== st
->family
)
1932 spin_unlock_bh(&ilb
->lock
);
1934 if (++st
->bucket
< INET_LHTABLE_SIZE
)
1939 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1941 struct tcp_iter_state
*st
= seq
->private;
1946 rc
= listening_get_next(seq
, NULL
);
1948 while (rc
&& *pos
) {
1949 rc
= listening_get_next(seq
, rc
);
1955 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1957 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1961 * Get first established socket starting from bucket given in st->bucket.
1962 * If st->bucket is zero, the very first socket in the hash is returned.
1964 static void *established_get_first(struct seq_file
*seq
)
1966 struct tcp_iter_state
*st
= seq
->private;
1967 struct net
*net
= seq_file_net(seq
);
1971 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1973 struct hlist_nulls_node
*node
;
1974 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1976 /* Lockless fast path for the common case of empty buckets */
1977 if (empty_bucket(st
))
1981 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1982 if (sk
->sk_family
!= st
->family
||
1983 !net_eq(sock_net(sk
), net
)) {
1989 spin_unlock_bh(lock
);
1995 static void *established_get_next(struct seq_file
*seq
, void *cur
)
1997 struct sock
*sk
= cur
;
1998 struct hlist_nulls_node
*node
;
1999 struct tcp_iter_state
*st
= seq
->private;
2000 struct net
*net
= seq_file_net(seq
);
2005 sk
= sk_nulls_next(sk
);
2007 sk_nulls_for_each_from(sk
, node
) {
2008 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2012 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2014 return established_get_first(seq
);
2017 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2019 struct tcp_iter_state
*st
= seq
->private;
2023 rc
= established_get_first(seq
);
2026 rc
= established_get_next(seq
, rc
);
2032 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2035 struct tcp_iter_state
*st
= seq
->private;
2037 st
->state
= TCP_SEQ_STATE_LISTENING
;
2038 rc
= listening_get_idx(seq
, &pos
);
2041 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2042 rc
= established_get_idx(seq
, pos
);
2048 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2050 struct tcp_iter_state
*st
= seq
->private;
2051 int offset
= st
->offset
;
2052 int orig_num
= st
->num
;
2055 switch (st
->state
) {
2056 case TCP_SEQ_STATE_LISTENING
:
2057 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2059 st
->state
= TCP_SEQ_STATE_LISTENING
;
2060 rc
= listening_get_next(seq
, NULL
);
2061 while (offset
-- && rc
)
2062 rc
= listening_get_next(seq
, rc
);
2066 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2068 case TCP_SEQ_STATE_ESTABLISHED
:
2069 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2071 rc
= established_get_first(seq
);
2072 while (offset
-- && rc
)
2073 rc
= established_get_next(seq
, rc
);
2081 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2083 struct tcp_iter_state
*st
= seq
->private;
2086 if (*pos
&& *pos
== st
->last_pos
) {
2087 rc
= tcp_seek_last_pos(seq
);
2092 st
->state
= TCP_SEQ_STATE_LISTENING
;
2096 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2099 st
->last_pos
= *pos
;
2103 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2105 struct tcp_iter_state
*st
= seq
->private;
2108 if (v
== SEQ_START_TOKEN
) {
2109 rc
= tcp_get_idx(seq
, 0);
2113 switch (st
->state
) {
2114 case TCP_SEQ_STATE_LISTENING
:
2115 rc
= listening_get_next(seq
, v
);
2117 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2120 rc
= established_get_first(seq
);
2123 case TCP_SEQ_STATE_ESTABLISHED
:
2124 rc
= established_get_next(seq
, v
);
2129 st
->last_pos
= *pos
;
2133 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2135 struct tcp_iter_state
*st
= seq
->private;
2137 switch (st
->state
) {
2138 case TCP_SEQ_STATE_LISTENING
:
2139 if (v
!= SEQ_START_TOKEN
)
2140 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2142 case TCP_SEQ_STATE_ESTABLISHED
:
2144 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2149 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2151 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2152 struct tcp_iter_state
*s
;
2155 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2156 sizeof(struct tcp_iter_state
));
2160 s
= ((struct seq_file
*)file
->private_data
)->private;
2161 s
->family
= afinfo
->family
;
2165 EXPORT_SYMBOL(tcp_seq_open
);
2167 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2170 struct proc_dir_entry
*p
;
2172 afinfo
->seq_ops
.start
= tcp_seq_start
;
2173 afinfo
->seq_ops
.next
= tcp_seq_next
;
2174 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2176 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2177 afinfo
->seq_fops
, afinfo
);
2182 EXPORT_SYMBOL(tcp_proc_register
);
2184 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2186 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2188 EXPORT_SYMBOL(tcp_proc_unregister
);
2190 static void get_openreq4(const struct request_sock
*req
,
2191 struct seq_file
*f
, int i
)
2193 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2194 long delta
= req
->rsk_timer
.expires
- jiffies
;
2196 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2197 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2202 ntohs(ireq
->ir_rmt_port
),
2204 0, 0, /* could print option size, but that is af dependent. */
2205 1, /* timers active (only the expire timer) */
2206 jiffies_delta_to_clock_t(delta
),
2208 from_kuid_munged(seq_user_ns(f
),
2209 sock_i_uid(req
->rsk_listener
)),
2210 0, /* non standard timer */
2211 0, /* open_requests have no inode */
2216 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2219 unsigned long timer_expires
;
2220 const struct tcp_sock
*tp
= tcp_sk(sk
);
2221 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2222 const struct inet_sock
*inet
= inet_sk(sk
);
2223 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2224 __be32 dest
= inet
->inet_daddr
;
2225 __be32 src
= inet
->inet_rcv_saddr
;
2226 __u16 destp
= ntohs(inet
->inet_dport
);
2227 __u16 srcp
= ntohs(inet
->inet_sport
);
2231 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2232 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2233 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2235 timer_expires
= icsk
->icsk_timeout
;
2236 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2238 timer_expires
= icsk
->icsk_timeout
;
2239 } else if (timer_pending(&sk
->sk_timer
)) {
2241 timer_expires
= sk
->sk_timer
.expires
;
2244 timer_expires
= jiffies
;
2247 state
= sk_state_load(sk
);
2248 if (state
== TCP_LISTEN
)
2249 rx_queue
= sk
->sk_ack_backlog
;
2251 /* Because we don't lock the socket,
2252 * we might find a transient negative value.
2254 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2256 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2257 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2258 i
, src
, srcp
, dest
, destp
, state
,
2259 tp
->write_seq
- tp
->snd_una
,
2262 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2263 icsk
->icsk_retransmits
,
2264 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2265 icsk
->icsk_probes_out
,
2267 atomic_read(&sk
->sk_refcnt
), sk
,
2268 jiffies_to_clock_t(icsk
->icsk_rto
),
2269 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2270 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2272 state
== TCP_LISTEN
?
2273 fastopenq
->max_qlen
:
2274 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2277 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2278 struct seq_file
*f
, int i
)
2280 long delta
= tw
->tw_timer
.expires
- jiffies
;
2284 dest
= tw
->tw_daddr
;
2285 src
= tw
->tw_rcv_saddr
;
2286 destp
= ntohs(tw
->tw_dport
);
2287 srcp
= ntohs(tw
->tw_sport
);
2289 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2290 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2291 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2292 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2293 atomic_read(&tw
->tw_refcnt
), tw
);
2298 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2300 struct tcp_iter_state
*st
;
2301 struct sock
*sk
= v
;
2303 seq_setwidth(seq
, TMPSZ
- 1);
2304 if (v
== SEQ_START_TOKEN
) {
2305 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2306 "rx_queue tr tm->when retrnsmt uid timeout "
2312 if (sk
->sk_state
== TCP_TIME_WAIT
)
2313 get_timewait4_sock(v
, seq
, st
->num
);
2314 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2315 get_openreq4(v
, seq
, st
->num
);
2317 get_tcp4_sock(v
, seq
, st
->num
);
2323 static const struct file_operations tcp_afinfo_seq_fops
= {
2324 .owner
= THIS_MODULE
,
2325 .open
= tcp_seq_open
,
2327 .llseek
= seq_lseek
,
2328 .release
= seq_release_net
2331 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2334 .seq_fops
= &tcp_afinfo_seq_fops
,
2336 .show
= tcp4_seq_show
,
2340 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2342 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2345 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2347 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2350 static struct pernet_operations tcp4_net_ops
= {
2351 .init
= tcp4_proc_init_net
,
2352 .exit
= tcp4_proc_exit_net
,
2355 int __init
tcp4_proc_init(void)
2357 return register_pernet_subsys(&tcp4_net_ops
);
2360 void tcp4_proc_exit(void)
2362 unregister_pernet_subsys(&tcp4_net_ops
);
2364 #endif /* CONFIG_PROC_FS */
2366 struct proto tcp_prot
= {
2368 .owner
= THIS_MODULE
,
2370 .connect
= tcp_v4_connect
,
2371 .disconnect
= tcp_disconnect
,
2372 .accept
= inet_csk_accept
,
2374 .init
= tcp_v4_init_sock
,
2375 .destroy
= tcp_v4_destroy_sock
,
2376 .shutdown
= tcp_shutdown
,
2377 .setsockopt
= tcp_setsockopt
,
2378 .getsockopt
= tcp_getsockopt
,
2379 .recvmsg
= tcp_recvmsg
,
2380 .sendmsg
= tcp_sendmsg
,
2381 .sendpage
= tcp_sendpage
,
2382 .backlog_rcv
= tcp_v4_do_rcv
,
2383 .release_cb
= tcp_release_cb
,
2385 .unhash
= inet_unhash
,
2386 .get_port
= inet_csk_get_port
,
2387 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2388 .stream_memory_free
= tcp_stream_memory_free
,
2389 .sockets_allocated
= &tcp_sockets_allocated
,
2390 .orphan_count
= &tcp_orphan_count
,
2391 .memory_allocated
= &tcp_memory_allocated
,
2392 .memory_pressure
= &tcp_memory_pressure
,
2393 .sysctl_mem
= sysctl_tcp_mem
,
2394 .sysctl_wmem
= sysctl_tcp_wmem
,
2395 .sysctl_rmem
= sysctl_tcp_rmem
,
2396 .max_header
= MAX_TCP_HEADER
,
2397 .obj_size
= sizeof(struct tcp_sock
),
2398 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2399 .twsk_prot
= &tcp_timewait_sock_ops
,
2400 .rsk_prot
= &tcp_request_sock_ops
,
2401 .h
.hashinfo
= &tcp_hashinfo
,
2402 .no_autobind
= true,
2403 #ifdef CONFIG_COMPAT
2404 .compat_setsockopt
= compat_tcp_setsockopt
,
2405 .compat_getsockopt
= compat_tcp_getsockopt
,
2407 .diag_destroy
= tcp_abort
,
2409 EXPORT_SYMBOL(tcp_prot
);
2411 static void __net_exit
tcp_sk_exit(struct net
*net
)
2415 for_each_possible_cpu(cpu
)
2416 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2417 free_percpu(net
->ipv4
.tcp_sk
);
2420 static int __net_init
tcp_sk_init(struct net
*net
)
2424 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2425 if (!net
->ipv4
.tcp_sk
)
2428 for_each_possible_cpu(cpu
) {
2431 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2435 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
2436 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2439 net
->ipv4
.sysctl_tcp_ecn
= 2;
2440 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2442 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2443 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2444 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2446 net
->ipv4
.sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
2447 net
->ipv4
.sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
2448 net
->ipv4
.sysctl_tcp_keepalive_intvl
= TCP_KEEPALIVE_INTVL
;
2450 net
->ipv4
.sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
2451 net
->ipv4
.sysctl_tcp_synack_retries
= TCP_SYNACK_RETRIES
;
2452 net
->ipv4
.sysctl_tcp_syncookies
= 1;
2453 net
->ipv4
.sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
2454 net
->ipv4
.sysctl_tcp_retries1
= TCP_RETR1
;
2455 net
->ipv4
.sysctl_tcp_retries2
= TCP_RETR2
;
2456 net
->ipv4
.sysctl_tcp_orphan_retries
= 0;
2457 net
->ipv4
.sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
2458 net
->ipv4
.sysctl_tcp_notsent_lowat
= UINT_MAX
;
2467 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2469 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2472 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2473 .init
= tcp_sk_init
,
2474 .exit
= tcp_sk_exit
,
2475 .exit_batch
= tcp_sk_exit_batch
,
2478 void __init
tcp_v4_init(void)
2480 inet_hashinfo_init(&tcp_hashinfo
);
2481 if (register_pernet_subsys(&tcp_sk_ops
))
2482 panic("Failed to create the TCP control socket.\n");