2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_tw_reuse __read_mostly
;
88 int sysctl_tcp_low_latency __read_mostly
;
89 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
91 #ifdef CONFIG_TCP_MD5SIG
92 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
93 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
96 struct inet_hashinfo tcp_hashinfo
;
97 EXPORT_SYMBOL(tcp_hashinfo
);
99 static __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
101 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
104 tcp_hdr(skb
)->source
);
107 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
109 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
110 struct tcp_sock
*tp
= tcp_sk(sk
);
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
123 if (tcptw
->tw_ts_recent_stamp
&&
124 (!twp
|| (sysctl_tcp_tw_reuse
&&
125 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
126 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
127 if (tp
->write_seq
== 0)
129 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
130 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
137 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
139 /* This will initiate an outgoing connection. */
140 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
142 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
143 struct inet_sock
*inet
= inet_sk(sk
);
144 struct tcp_sock
*tp
= tcp_sk(sk
);
145 __be16 orig_sport
, orig_dport
;
146 __be32 daddr
, nexthop
;
150 struct ip_options_rcu
*inet_opt
;
152 if (addr_len
< sizeof(struct sockaddr_in
))
155 if (usin
->sin_family
!= AF_INET
)
156 return -EAFNOSUPPORT
;
158 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
159 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
160 lockdep_sock_is_held(sk
));
161 if (inet_opt
&& inet_opt
->opt
.srr
) {
164 nexthop
= inet_opt
->opt
.faddr
;
167 orig_sport
= inet
->inet_sport
;
168 orig_dport
= usin
->sin_port
;
169 fl4
= &inet
->cork
.fl
.u
.ip4
;
170 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
171 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
173 orig_sport
, orig_dport
, sk
);
176 if (err
== -ENETUNREACH
)
177 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
181 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
186 if (!inet_opt
|| !inet_opt
->opt
.srr
)
189 if (!inet
->inet_saddr
)
190 inet
->inet_saddr
= fl4
->saddr
;
191 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
193 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
194 /* Reset inherited state */
195 tp
->rx_opt
.ts_recent
= 0;
196 tp
->rx_opt
.ts_recent_stamp
= 0;
197 if (likely(!tp
->repair
))
201 if (tcp_death_row
.sysctl_tw_recycle
&&
202 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
203 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
205 inet
->inet_dport
= usin
->sin_port
;
206 sk_daddr_set(sk
, daddr
);
208 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
210 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
212 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
219 tcp_set_state(sk
, TCP_SYN_SENT
);
220 err
= inet_hash_connect(&tcp_death_row
, sk
);
226 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
227 inet
->inet_sport
, inet
->inet_dport
, sk
);
233 /* OK, now commit destination to socket. */
234 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
235 sk_setup_caps(sk
, &rt
->dst
);
237 if (!tp
->write_seq
&& likely(!tp
->repair
))
238 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
243 inet
->inet_id
= tp
->write_seq
^ jiffies
;
245 err
= tcp_connect(sk
);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk
, TCP_CLOSE
);
260 sk
->sk_route_caps
= 0;
261 inet
->inet_dport
= 0;
264 EXPORT_SYMBOL(tcp_v4_connect
);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 void tcp_v4_mtu_reduced(struct sock
*sk
)
273 struct dst_entry
*dst
;
274 struct inet_sock
*inet
= inet_sk(sk
);
275 u32 mtu
= tcp_sk(sk
)->mtu_info
;
277 dst
= inet_csk_update_pmtu(sk
, mtu
);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
285 sk
->sk_err_soft
= EMSGSIZE
;
289 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
290 ip_sk_accept_pmtu(sk
) &&
291 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
292 tcp_sync_mss(sk
, mtu
);
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
299 tcp_simple_retransmit(sk
);
300 } /* else let the usual retransmit timer handle it */
302 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
304 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
306 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
309 dst
->ops
->redirect(dst
, sk
, skb
);
313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
314 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
316 struct request_sock
*req
= inet_reqsk(sk
);
317 struct net
*net
= sock_net(sk
);
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
322 if (seq
!= tcp_rsk(req
)->snt_isn
) {
323 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
331 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
332 tcp_listendrop(req
->rsk_listener
);
336 EXPORT_SYMBOL(tcp_req_err
);
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
354 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
356 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
357 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
358 struct inet_connection_sock
*icsk
;
360 struct inet_sock
*inet
;
361 const int type
= icmp_hdr(icmp_skb
)->type
;
362 const int code
= icmp_hdr(icmp_skb
)->code
;
365 struct request_sock
*fastopen
;
369 struct net
*net
= dev_net(icmp_skb
->dev
);
371 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
372 th
->dest
, iph
->saddr
, ntohs(th
->source
),
375 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
378 if (sk
->sk_state
== TCP_TIME_WAIT
) {
379 inet_twsk_put(inet_twsk(sk
));
382 seq
= ntohl(th
->seq
);
383 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
384 return tcp_req_err(sk
, seq
,
385 type
== ICMP_PARAMETERPROB
||
386 type
== ICMP_TIME_EXCEEDED
||
387 (type
== ICMP_DEST_UNREACH
&&
388 (code
== ICMP_NET_UNREACH
||
389 code
== ICMP_HOST_UNREACH
)));
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
397 if (sock_owned_by_user(sk
)) {
398 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
399 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
401 if (sk
->sk_state
== TCP_CLOSE
)
404 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
405 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen
= tp
->fastopen_rsk
;
413 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
414 if (sk
->sk_state
!= TCP_LISTEN
&&
415 !between(seq
, snd_una
, tp
->snd_nxt
)) {
416 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
422 do_redirect(icmp_skb
, sk
);
424 case ICMP_SOURCE_QUENCH
:
425 /* Just silently ignore these. */
427 case ICMP_PARAMETERPROB
:
430 case ICMP_DEST_UNREACH
:
431 if (code
> NR_ICMP_UNREACH
)
434 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
439 if (sk
->sk_state
== TCP_LISTEN
)
443 if (!sock_owned_by_user(sk
)) {
444 tcp_v4_mtu_reduced(sk
);
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
452 err
= icmp_err_convert
[code
].errno
;
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
457 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
458 !icsk
->icsk_backoff
|| fastopen
)
461 if (sock_owned_by_user(sk
))
464 icsk
->icsk_backoff
--;
465 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
467 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
469 skb
= tcp_write_queue_head(sk
);
472 remaining
= icsk
->icsk_rto
-
474 tcp_time_stamp
- tcp_skb_timestamp(skb
));
477 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
478 remaining
, TCP_RTO_MAX
);
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk
);
486 case ICMP_TIME_EXCEEDED
:
493 switch (sk
->sk_state
) {
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
499 if (fastopen
&& !fastopen
->sk
)
502 if (!sock_owned_by_user(sk
)) {
505 sk
->sk_error_report(sk
);
509 sk
->sk_err_soft
= err
;
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 * Now we are in compliance with RFCs.
531 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
533 sk
->sk_error_report(sk
);
534 } else { /* Only an error on timeout */
535 sk
->sk_err_soft
= err
;
543 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
545 struct tcphdr
*th
= tcp_hdr(skb
);
547 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
548 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
549 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
550 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
552 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
562 const struct inet_sock
*inet
= inet_sk(sk
);
564 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
566 EXPORT_SYMBOL(tcp_v4_send_check
);
569 * This routine will send an RST to the other tcp.
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
581 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
583 const struct tcphdr
*th
= tcp_hdr(skb
);
586 #ifdef CONFIG_TCP_MD5SIG
587 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
590 struct ip_reply_arg arg
;
591 #ifdef CONFIG_TCP_MD5SIG
592 struct tcp_md5sig_key
*key
= NULL
;
593 const __u8
*hash_location
= NULL
;
594 unsigned char newhash
[16];
596 struct sock
*sk1
= NULL
;
600 /* Never send a reset in response to a reset. */
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
607 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
610 /* Swap the send and the receive. */
611 memset(&rep
, 0, sizeof(rep
));
612 rep
.th
.dest
= th
->source
;
613 rep
.th
.source
= th
->dest
;
614 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
618 rep
.th
.seq
= th
->ack_seq
;
621 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
622 skb
->len
- (th
->doff
<< 2));
625 memset(&arg
, 0, sizeof(arg
));
626 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
627 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
629 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
630 #ifdef CONFIG_TCP_MD5SIG
632 hash_location
= tcp_parse_md5sig_option(th
);
633 if (sk
&& sk_fullsock(sk
)) {
634 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
635 &ip_hdr(skb
)->saddr
, AF_INET
);
636 } else if (hash_location
) {
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
644 sk1
= __inet_lookup_listener(net
, &tcp_hashinfo
, NULL
, 0,
646 th
->source
, ip_hdr(skb
)->daddr
,
647 ntohs(th
->source
), inet_iif(skb
));
648 /* don't send rst if it can't find key */
652 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
653 &ip_hdr(skb
)->saddr
, AF_INET
);
658 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
659 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
665 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
667 (TCPOPT_MD5SIG
<< 8) |
669 /* Update length and the length the header thinks exists */
670 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
671 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
673 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
674 key
, ip_hdr(skb
)->saddr
,
675 ip_hdr(skb
)->daddr
, &rep
.th
);
678 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
679 ip_hdr(skb
)->saddr
, /* XXX */
680 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
681 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
682 arg
.flags
= (sk
&& inet_sk_transparent(sk
)) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
684 /* When socket is gone, all binding information is lost.
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
689 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
691 BUILD_BUG_ON(offsetof(struct sock
, sk_bound_dev_if
) !=
692 offsetof(struct inet_timewait_sock
, tw_bound_dev_if
));
694 arg
.tos
= ip_hdr(skb
)->tos
;
696 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
697 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
698 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
699 &arg
, arg
.iov
[0].iov_len
);
701 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
702 __TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
705 #ifdef CONFIG_TCP_MD5SIG
711 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
712 outside socket context is ugly, certainly. What can I do?
715 static void tcp_v4_send_ack(struct net
*net
,
716 struct sk_buff
*skb
, u32 seq
, u32 ack
,
717 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
718 struct tcp_md5sig_key
*key
,
719 int reply_flags
, u8 tos
)
721 const struct tcphdr
*th
= tcp_hdr(skb
);
724 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
725 #ifdef CONFIG_TCP_MD5SIG
726 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
730 struct ip_reply_arg arg
;
732 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
733 memset(&arg
, 0, sizeof(arg
));
735 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
736 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
738 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
739 (TCPOPT_TIMESTAMP
<< 8) |
741 rep
.opt
[1] = htonl(tsval
);
742 rep
.opt
[2] = htonl(tsecr
);
743 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
746 /* Swap the send and the receive. */
747 rep
.th
.dest
= th
->source
;
748 rep
.th
.source
= th
->dest
;
749 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
750 rep
.th
.seq
= htonl(seq
);
751 rep
.th
.ack_seq
= htonl(ack
);
753 rep
.th
.window
= htons(win
);
755 #ifdef CONFIG_TCP_MD5SIG
757 int offset
= (tsecr
) ? 3 : 0;
759 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
761 (TCPOPT_MD5SIG
<< 8) |
763 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
764 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
766 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
767 key
, ip_hdr(skb
)->saddr
,
768 ip_hdr(skb
)->daddr
, &rep
.th
);
771 arg
.flags
= reply_flags
;
772 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
773 ip_hdr(skb
)->saddr
, /* XXX */
774 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
775 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
777 arg
.bound_dev_if
= oif
;
780 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
781 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
782 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
783 &arg
, arg
.iov
[0].iov_len
);
785 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
789 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
791 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
792 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
794 tcp_v4_send_ack(sock_net(sk
), skb
,
795 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
796 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
797 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
800 tcp_twsk_md5_key(tcptw
),
801 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
808 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
809 struct request_sock
*req
)
811 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
812 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
814 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
817 tcp_v4_send_ack(sock_net(sk
), skb
, seq
,
818 tcp_rsk(req
)->rcv_nxt
, req
->rsk_rcv_wnd
,
822 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
824 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
829 * Send a SYN-ACK after having received a SYN.
830 * This still operates on a request_sock only, not on a big
833 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
835 struct request_sock
*req
,
836 struct tcp_fastopen_cookie
*foc
,
837 enum tcp_synack_type synack_type
)
839 const struct inet_request_sock
*ireq
= inet_rsk(req
);
844 /* First, grab a route. */
845 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
848 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
851 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
853 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
856 err
= net_xmit_eval(err
);
863 * IPv4 request_sock destructor.
865 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
867 kfree(inet_rsk(req
)->opt
);
870 #ifdef CONFIG_TCP_MD5SIG
872 * RFC2385 MD5 checksumming requires a mapping of
873 * IP address->MD5 Key.
874 * We need to maintain these in the sk structure.
877 /* Find the Key structure for an address. */
878 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
879 const union tcp_md5_addr
*addr
,
882 const struct tcp_sock
*tp
= tcp_sk(sk
);
883 struct tcp_md5sig_key
*key
;
884 unsigned int size
= sizeof(struct in_addr
);
885 const struct tcp_md5sig_info
*md5sig
;
887 /* caller either holds rcu_read_lock() or socket lock */
888 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
889 lockdep_sock_is_held(sk
));
892 #if IS_ENABLED(CONFIG_IPV6)
893 if (family
== AF_INET6
)
894 size
= sizeof(struct in6_addr
);
896 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
897 if (key
->family
!= family
)
899 if (!memcmp(&key
->addr
, addr
, size
))
904 EXPORT_SYMBOL(tcp_md5_do_lookup
);
906 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
907 const struct sock
*addr_sk
)
909 const union tcp_md5_addr
*addr
;
911 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
912 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
914 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
916 /* This can be called on a newly created socket, from other files */
917 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
918 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
920 /* Add Key to the list */
921 struct tcp_md5sig_key
*key
;
922 struct tcp_sock
*tp
= tcp_sk(sk
);
923 struct tcp_md5sig_info
*md5sig
;
925 key
= tcp_md5_do_lookup(sk
, addr
, family
);
927 /* Pre-existing entry - just update that one. */
928 memcpy(key
->key
, newkey
, newkeylen
);
929 key
->keylen
= newkeylen
;
933 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
934 lockdep_sock_is_held(sk
));
936 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
940 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
941 INIT_HLIST_HEAD(&md5sig
->head
);
942 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
945 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
948 if (!tcp_alloc_md5sig_pool()) {
949 sock_kfree_s(sk
, key
, sizeof(*key
));
953 memcpy(key
->key
, newkey
, newkeylen
);
954 key
->keylen
= newkeylen
;
955 key
->family
= family
;
956 memcpy(&key
->addr
, addr
,
957 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
958 sizeof(struct in_addr
));
959 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
962 EXPORT_SYMBOL(tcp_md5_do_add
);
964 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
966 struct tcp_md5sig_key
*key
;
968 key
= tcp_md5_do_lookup(sk
, addr
, family
);
971 hlist_del_rcu(&key
->node
);
972 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
976 EXPORT_SYMBOL(tcp_md5_do_del
);
978 static void tcp_clear_md5_list(struct sock
*sk
)
980 struct tcp_sock
*tp
= tcp_sk(sk
);
981 struct tcp_md5sig_key
*key
;
982 struct hlist_node
*n
;
983 struct tcp_md5sig_info
*md5sig
;
985 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
987 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
988 hlist_del_rcu(&key
->node
);
989 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
994 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
997 struct tcp_md5sig cmd
;
998 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1000 if (optlen
< sizeof(cmd
))
1003 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1006 if (sin
->sin_family
!= AF_INET
)
1009 if (!cmd
.tcpm_keylen
)
1010 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1013 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1016 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1017 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1021 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1022 __be32 daddr
, __be32 saddr
, int nbytes
)
1024 struct tcp4_pseudohdr
*bp
;
1025 struct scatterlist sg
;
1027 bp
= &hp
->md5_blk
.ip4
;
1030 * 1. the TCP pseudo-header (in the order: source IP address,
1031 * destination IP address, zero-padded protocol number, and
1037 bp
->protocol
= IPPROTO_TCP
;
1038 bp
->len
= cpu_to_be16(nbytes
);
1040 sg_init_one(&sg
, bp
, sizeof(*bp
));
1041 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
, sizeof(*bp
));
1042 return crypto_ahash_update(hp
->md5_req
);
1045 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1046 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1048 struct tcp_md5sig_pool
*hp
;
1049 struct ahash_request
*req
;
1051 hp
= tcp_get_md5sig_pool();
1053 goto clear_hash_noput
;
1056 if (crypto_ahash_init(req
))
1058 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1060 if (tcp_md5_hash_header(hp
, th
))
1062 if (tcp_md5_hash_key(hp
, key
))
1064 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1065 if (crypto_ahash_final(req
))
1068 tcp_put_md5sig_pool();
1072 tcp_put_md5sig_pool();
1074 memset(md5_hash
, 0, 16);
1078 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1079 const struct sock
*sk
,
1080 const struct sk_buff
*skb
)
1082 struct tcp_md5sig_pool
*hp
;
1083 struct ahash_request
*req
;
1084 const struct tcphdr
*th
= tcp_hdr(skb
);
1085 __be32 saddr
, daddr
;
1087 if (sk
) { /* valid for establish/request sockets */
1088 saddr
= sk
->sk_rcv_saddr
;
1089 daddr
= sk
->sk_daddr
;
1091 const struct iphdr
*iph
= ip_hdr(skb
);
1096 hp
= tcp_get_md5sig_pool();
1098 goto clear_hash_noput
;
1101 if (crypto_ahash_init(req
))
1104 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1106 if (tcp_md5_hash_header(hp
, th
))
1108 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1110 if (tcp_md5_hash_key(hp
, key
))
1112 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1113 if (crypto_ahash_final(req
))
1116 tcp_put_md5sig_pool();
1120 tcp_put_md5sig_pool();
1122 memset(md5_hash
, 0, 16);
1125 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1129 /* Called with rcu_read_lock() */
1130 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1131 const struct sk_buff
*skb
)
1133 #ifdef CONFIG_TCP_MD5SIG
1135 * This gets called for each TCP segment that arrives
1136 * so we want to be efficient.
1137 * We have 3 drop cases:
1138 * o No MD5 hash and one expected.
1139 * o MD5 hash and we're not expecting one.
1140 * o MD5 hash and its wrong.
1142 const __u8
*hash_location
= NULL
;
1143 struct tcp_md5sig_key
*hash_expected
;
1144 const struct iphdr
*iph
= ip_hdr(skb
);
1145 const struct tcphdr
*th
= tcp_hdr(skb
);
1147 unsigned char newhash
[16];
1149 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1151 hash_location
= tcp_parse_md5sig_option(th
);
1153 /* We've parsed the options - do we have a hash? */
1154 if (!hash_expected
&& !hash_location
)
1157 if (hash_expected
&& !hash_location
) {
1158 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1162 if (!hash_expected
&& hash_location
) {
1163 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1167 /* Okay, so this is hash_expected and hash_location -
1168 * so we need to calculate the checksum.
1170 genhash
= tcp_v4_md5_hash_skb(newhash
,
1174 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1175 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1176 &iph
->saddr
, ntohs(th
->source
),
1177 &iph
->daddr
, ntohs(th
->dest
),
1178 genhash
? " tcp_v4_calc_md5_hash failed"
1187 static void tcp_v4_init_req(struct request_sock
*req
,
1188 const struct sock
*sk_listener
,
1189 struct sk_buff
*skb
)
1191 struct inet_request_sock
*ireq
= inet_rsk(req
);
1193 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1194 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1195 ireq
->no_srccheck
= inet_sk(sk_listener
)->transparent
;
1196 ireq
->opt
= tcp_v4_save_options(skb
);
1199 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1201 const struct request_sock
*req
,
1204 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1207 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1216 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1218 .obj_size
= sizeof(struct tcp_request_sock
),
1219 .rtx_syn_ack
= tcp_rtx_synack
,
1220 .send_ack
= tcp_v4_reqsk_send_ack
,
1221 .destructor
= tcp_v4_reqsk_destructor
,
1222 .send_reset
= tcp_v4_send_reset
,
1223 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1226 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1227 .mss_clamp
= TCP_MSS_DEFAULT
,
1228 #ifdef CONFIG_TCP_MD5SIG
1229 .req_md5_lookup
= tcp_v4_md5_lookup
,
1230 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1232 .init_req
= tcp_v4_init_req
,
1233 #ifdef CONFIG_SYN_COOKIES
1234 .cookie_init_seq
= cookie_v4_init_sequence
,
1236 .route_req
= tcp_v4_route_req
,
1237 .init_seq
= tcp_v4_init_sequence
,
1238 .send_synack
= tcp_v4_send_synack
,
1241 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1243 /* Never answer to SYNs send to broadcast or multicast */
1244 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1247 return tcp_conn_request(&tcp_request_sock_ops
,
1248 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1254 EXPORT_SYMBOL(tcp_v4_conn_request
);
1258 * The three way handshake has completed - we got a valid synack -
1259 * now create the new socket.
1261 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1262 struct request_sock
*req
,
1263 struct dst_entry
*dst
,
1264 struct request_sock
*req_unhash
,
1267 struct inet_request_sock
*ireq
;
1268 struct inet_sock
*newinet
;
1269 struct tcp_sock
*newtp
;
1271 #ifdef CONFIG_TCP_MD5SIG
1272 struct tcp_md5sig_key
*key
;
1274 struct ip_options_rcu
*inet_opt
;
1276 if (sk_acceptq_is_full(sk
))
1279 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1283 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1284 inet_sk_rx_dst_set(newsk
, skb
);
1286 newtp
= tcp_sk(newsk
);
1287 newinet
= inet_sk(newsk
);
1288 ireq
= inet_rsk(req
);
1289 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1290 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1291 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1292 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1293 inet_opt
= ireq
->opt
;
1294 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1296 newinet
->mc_index
= inet_iif(skb
);
1297 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1298 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1299 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1301 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1302 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1305 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1309 /* syncookie case : see end of cookie_v4_check() */
1311 sk_setup_caps(newsk
, dst
);
1313 tcp_ca_openreq_child(newsk
, dst
);
1315 tcp_sync_mss(newsk
, dst_mtu(dst
));
1316 newtp
->advmss
= dst_metric_advmss(dst
);
1317 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1318 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1319 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1321 tcp_initialize_rcv_mss(newsk
);
1323 #ifdef CONFIG_TCP_MD5SIG
1324 /* Copy over the MD5 key from the original socket */
1325 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1329 * We're using one, so create a matching key
1330 * on the newsk structure. If we fail to get
1331 * memory, then we end up not copying the key
1334 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1335 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1336 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1340 if (__inet_inherit_port(sk
, newsk
) < 0)
1342 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1344 tcp_move_syn(newtp
, req
);
1349 NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1356 inet_csk_prepare_forced_close(newsk
);
1360 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1362 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1364 #ifdef CONFIG_SYN_COOKIES
1365 const struct tcphdr
*th
= tcp_hdr(skb
);
1368 sk
= cookie_v4_check(sk
, skb
);
1373 /* The socket must have it's spinlock held when we get
1374 * here, unless it is a TCP_LISTEN socket.
1376 * We have a potential double-lock case here, so even when
1377 * doing backlog processing we use the BH locking scheme.
1378 * This is because we cannot sleep with the original spinlock
1381 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1385 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1386 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1388 sock_rps_save_rxhash(sk
, skb
);
1389 sk_mark_napi_id(sk
, skb
);
1391 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1392 !dst
->ops
->check(dst
, 0)) {
1394 sk
->sk_rx_dst
= NULL
;
1397 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1401 if (tcp_checksum_complete(skb
))
1404 if (sk
->sk_state
== TCP_LISTEN
) {
1405 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1410 sock_rps_save_rxhash(nsk
, skb
);
1411 sk_mark_napi_id(nsk
, skb
);
1412 if (tcp_child_process(sk
, nsk
, skb
)) {
1419 sock_rps_save_rxhash(sk
, skb
);
1421 if (tcp_rcv_state_process(sk
, skb
)) {
1428 tcp_v4_send_reset(rsk
, skb
);
1431 /* Be careful here. If this function gets more complicated and
1432 * gcc suffers from register pressure on the x86, sk (in %ebx)
1433 * might be destroyed here. This current version compiles correctly,
1434 * but you have been warned.
1439 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1440 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1443 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1445 void tcp_v4_early_demux(struct sk_buff
*skb
)
1447 const struct iphdr
*iph
;
1448 const struct tcphdr
*th
;
1451 if (skb
->pkt_type
!= PACKET_HOST
)
1454 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1460 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1463 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1464 iph
->saddr
, th
->source
,
1465 iph
->daddr
, ntohs(th
->dest
),
1469 skb
->destructor
= sock_edemux
;
1470 if (sk_fullsock(sk
)) {
1471 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1474 dst
= dst_check(dst
, 0);
1476 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1477 skb_dst_set_noref(skb
, dst
);
1482 /* Packet is added to VJ-style prequeue for processing in process
1483 * context, if a reader task is waiting. Apparently, this exciting
1484 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1485 * failed somewhere. Latency? Burstiness? Well, at least now we will
1486 * see, why it failed. 8)8) --ANK
1489 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1491 struct tcp_sock
*tp
= tcp_sk(sk
);
1493 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1496 if (skb
->len
<= tcp_hdrlen(skb
) &&
1497 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1500 /* Before escaping RCU protected region, we need to take care of skb
1501 * dst. Prequeue is only enabled for established sockets.
1502 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1503 * Instead of doing full sk_rx_dst validity here, let's perform
1504 * an optimistic check.
1506 if (likely(sk
->sk_rx_dst
))
1509 skb_dst_force_safe(skb
);
1511 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1512 tp
->ucopy
.memory
+= skb
->truesize
;
1513 if (skb_queue_len(&tp
->ucopy
.prequeue
) >= 32 ||
1514 tp
->ucopy
.memory
+ atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
) {
1515 struct sk_buff
*skb1
;
1517 BUG_ON(sock_owned_by_user(sk
));
1518 __NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPPREQUEUEDROPPED
,
1519 skb_queue_len(&tp
->ucopy
.prequeue
));
1521 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1522 sk_backlog_rcv(sk
, skb1
);
1524 tp
->ucopy
.memory
= 0;
1525 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1526 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1527 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1528 if (!inet_csk_ack_scheduled(sk
))
1529 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1530 (3 * tcp_rto_min(sk
)) / 4,
1535 EXPORT_SYMBOL(tcp_prequeue
);
1541 int tcp_v4_rcv(struct sk_buff
*skb
)
1543 struct net
*net
= dev_net(skb
->dev
);
1544 const struct iphdr
*iph
;
1545 const struct tcphdr
*th
;
1550 if (skb
->pkt_type
!= PACKET_HOST
)
1553 /* Count it even if it's bad */
1554 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1556 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1559 th
= (const struct tcphdr
*)skb
->data
;
1561 if (unlikely(th
->doff
< sizeof(struct tcphdr
) / 4))
1563 if (!pskb_may_pull(skb
, th
->doff
* 4))
1566 /* An explanation is required here, I think.
1567 * Packet length and doff are validated by header prediction,
1568 * provided case of th->doff==0 is eliminated.
1569 * So, we defer the checks. */
1571 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1574 th
= (const struct tcphdr
*)skb
->data
;
1576 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1577 * barrier() makes sure compiler wont play fool^Waliasing games.
1579 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1580 sizeof(struct inet_skb_parm
));
1583 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1584 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1585 skb
->len
- th
->doff
* 4);
1586 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1587 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1588 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1589 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1590 TCP_SKB_CB(skb
)->sacked
= 0;
1593 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
), th
->source
,
1594 th
->dest
, &refcounted
);
1599 if (sk
->sk_state
== TCP_TIME_WAIT
)
1602 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1603 struct request_sock
*req
= inet_reqsk(sk
);
1606 sk
= req
->rsk_listener
;
1607 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1611 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1612 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1615 /* We own a reference on the listener, increase it again
1616 * as we might lose it too soon.
1620 nsk
= tcp_check_req(sk
, skb
, req
, false);
1623 goto discard_and_relse
;
1627 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1628 tcp_v4_send_reset(nsk
, skb
);
1629 goto discard_and_relse
;
1635 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1636 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1637 goto discard_and_relse
;
1640 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1641 goto discard_and_relse
;
1643 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1644 goto discard_and_relse
;
1648 if (sk_filter(sk
, skb
))
1649 goto discard_and_relse
;
1653 if (sk
->sk_state
== TCP_LISTEN
) {
1654 ret
= tcp_v4_do_rcv(sk
, skb
);
1655 goto put_and_return
;
1658 sk_incoming_cpu_update(sk
);
1660 bh_lock_sock_nested(sk
);
1661 tcp_segs_in(tcp_sk(sk
), skb
);
1663 if (!sock_owned_by_user(sk
)) {
1664 if (!tcp_prequeue(sk
, skb
))
1665 ret
= tcp_v4_do_rcv(sk
, skb
);
1666 } else if (unlikely(sk_add_backlog(sk
, skb
,
1667 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1669 __NET_INC_STATS(net
, LINUX_MIB_TCPBACKLOGDROP
);
1670 goto discard_and_relse
;
1681 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1684 if (tcp_checksum_complete(skb
)) {
1686 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1688 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1690 tcp_v4_send_reset(NULL
, skb
);
1694 /* Discard frame. */
1699 sk_drops_add(sk
, skb
);
1705 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1706 inet_twsk_put(inet_twsk(sk
));
1710 if (tcp_checksum_complete(skb
)) {
1711 inet_twsk_put(inet_twsk(sk
));
1714 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1716 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1719 iph
->saddr
, th
->source
,
1720 iph
->daddr
, th
->dest
,
1723 inet_twsk_deschedule_put(inet_twsk(sk
));
1728 /* Fall through to ACK */
1731 tcp_v4_timewait_ack(sk
, skb
);
1734 tcp_v4_send_reset(sk
, skb
);
1735 inet_twsk_deschedule_put(inet_twsk(sk
));
1737 case TCP_TW_SUCCESS
:;
1742 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1743 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1744 .twsk_unique
= tcp_twsk_unique
,
1745 .twsk_destructor
= tcp_twsk_destructor
,
1748 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1750 struct dst_entry
*dst
= skb_dst(skb
);
1752 if (dst
&& dst_hold_safe(dst
)) {
1753 sk
->sk_rx_dst
= dst
;
1754 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1757 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1759 const struct inet_connection_sock_af_ops ipv4_specific
= {
1760 .queue_xmit
= ip_queue_xmit
,
1761 .send_check
= tcp_v4_send_check
,
1762 .rebuild_header
= inet_sk_rebuild_header
,
1763 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1764 .conn_request
= tcp_v4_conn_request
,
1765 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1766 .net_header_len
= sizeof(struct iphdr
),
1767 .setsockopt
= ip_setsockopt
,
1768 .getsockopt
= ip_getsockopt
,
1769 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1770 .sockaddr_len
= sizeof(struct sockaddr_in
),
1771 .bind_conflict
= inet_csk_bind_conflict
,
1772 #ifdef CONFIG_COMPAT
1773 .compat_setsockopt
= compat_ip_setsockopt
,
1774 .compat_getsockopt
= compat_ip_getsockopt
,
1776 .mtu_reduced
= tcp_v4_mtu_reduced
,
1778 EXPORT_SYMBOL(ipv4_specific
);
1780 #ifdef CONFIG_TCP_MD5SIG
1781 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1782 .md5_lookup
= tcp_v4_md5_lookup
,
1783 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1784 .md5_parse
= tcp_v4_parse_md5_keys
,
1788 /* NOTE: A lot of things set to zero explicitly by call to
1789 * sk_alloc() so need not be done here.
1791 static int tcp_v4_init_sock(struct sock
*sk
)
1793 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1797 icsk
->icsk_af_ops
= &ipv4_specific
;
1799 #ifdef CONFIG_TCP_MD5SIG
1800 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1806 void tcp_v4_destroy_sock(struct sock
*sk
)
1808 struct tcp_sock
*tp
= tcp_sk(sk
);
1810 tcp_clear_xmit_timers(sk
);
1812 tcp_cleanup_congestion_control(sk
);
1814 /* Cleanup up the write buffer. */
1815 tcp_write_queue_purge(sk
);
1817 /* Cleans up our, hopefully empty, out_of_order_queue. */
1818 __skb_queue_purge(&tp
->out_of_order_queue
);
1820 #ifdef CONFIG_TCP_MD5SIG
1821 /* Clean up the MD5 key list, if any */
1822 if (tp
->md5sig_info
) {
1823 tcp_clear_md5_list(sk
);
1824 kfree_rcu(tp
->md5sig_info
, rcu
);
1825 tp
->md5sig_info
= NULL
;
1829 /* Clean prequeue, it must be empty really */
1830 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1832 /* Clean up a referenced TCP bind bucket. */
1833 if (inet_csk(sk
)->icsk_bind_hash
)
1836 BUG_ON(tp
->fastopen_rsk
);
1838 /* If socket is aborted during connect operation */
1839 tcp_free_fastopen_req(tp
);
1840 tcp_saved_syn_free(tp
);
1843 sk_sockets_allocated_dec(sk
);
1846 if (mem_cgroup_sockets_enabled
&& sk
->sk_memcg
)
1847 sock_release_memcg(sk
);
1849 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1851 #ifdef CONFIG_PROC_FS
1852 /* Proc filesystem TCP sock list dumping. */
1855 * Get next listener socket follow cur. If cur is NULL, get first socket
1856 * starting from bucket given in st->bucket; when st->bucket is zero the
1857 * very first socket in the hash table is returned.
1859 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1861 struct tcp_iter_state
*st
= seq
->private;
1862 struct net
*net
= seq_file_net(seq
);
1863 struct inet_listen_hashbucket
*ilb
;
1864 struct inet_connection_sock
*icsk
;
1865 struct sock
*sk
= cur
;
1869 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1870 spin_lock_bh(&ilb
->lock
);
1871 sk
= sk_head(&ilb
->head
);
1875 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1881 sk_for_each_from(sk
) {
1882 if (!net_eq(sock_net(sk
), net
))
1884 if (sk
->sk_family
== st
->family
)
1886 icsk
= inet_csk(sk
);
1888 spin_unlock_bh(&ilb
->lock
);
1890 if (++st
->bucket
< INET_LHTABLE_SIZE
)
1895 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1897 struct tcp_iter_state
*st
= seq
->private;
1902 rc
= listening_get_next(seq
, NULL
);
1904 while (rc
&& *pos
) {
1905 rc
= listening_get_next(seq
, rc
);
1911 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1913 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1917 * Get first established socket starting from bucket given in st->bucket.
1918 * If st->bucket is zero, the very first socket in the hash is returned.
1920 static void *established_get_first(struct seq_file
*seq
)
1922 struct tcp_iter_state
*st
= seq
->private;
1923 struct net
*net
= seq_file_net(seq
);
1927 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1929 struct hlist_nulls_node
*node
;
1930 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1932 /* Lockless fast path for the common case of empty buckets */
1933 if (empty_bucket(st
))
1937 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1938 if (sk
->sk_family
!= st
->family
||
1939 !net_eq(sock_net(sk
), net
)) {
1945 spin_unlock_bh(lock
);
1951 static void *established_get_next(struct seq_file
*seq
, void *cur
)
1953 struct sock
*sk
= cur
;
1954 struct hlist_nulls_node
*node
;
1955 struct tcp_iter_state
*st
= seq
->private;
1956 struct net
*net
= seq_file_net(seq
);
1961 sk
= sk_nulls_next(sk
);
1963 sk_nulls_for_each_from(sk
, node
) {
1964 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
1968 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
1970 return established_get_first(seq
);
1973 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
1975 struct tcp_iter_state
*st
= seq
->private;
1979 rc
= established_get_first(seq
);
1982 rc
= established_get_next(seq
, rc
);
1988 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
1991 struct tcp_iter_state
*st
= seq
->private;
1993 st
->state
= TCP_SEQ_STATE_LISTENING
;
1994 rc
= listening_get_idx(seq
, &pos
);
1997 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
1998 rc
= established_get_idx(seq
, pos
);
2004 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2006 struct tcp_iter_state
*st
= seq
->private;
2007 int offset
= st
->offset
;
2008 int orig_num
= st
->num
;
2011 switch (st
->state
) {
2012 case TCP_SEQ_STATE_LISTENING
:
2013 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2015 st
->state
= TCP_SEQ_STATE_LISTENING
;
2016 rc
= listening_get_next(seq
, NULL
);
2017 while (offset
-- && rc
)
2018 rc
= listening_get_next(seq
, rc
);
2022 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2024 case TCP_SEQ_STATE_ESTABLISHED
:
2025 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2027 rc
= established_get_first(seq
);
2028 while (offset
-- && rc
)
2029 rc
= established_get_next(seq
, rc
);
2037 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2039 struct tcp_iter_state
*st
= seq
->private;
2042 if (*pos
&& *pos
== st
->last_pos
) {
2043 rc
= tcp_seek_last_pos(seq
);
2048 st
->state
= TCP_SEQ_STATE_LISTENING
;
2052 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2055 st
->last_pos
= *pos
;
2059 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2061 struct tcp_iter_state
*st
= seq
->private;
2064 if (v
== SEQ_START_TOKEN
) {
2065 rc
= tcp_get_idx(seq
, 0);
2069 switch (st
->state
) {
2070 case TCP_SEQ_STATE_LISTENING
:
2071 rc
= listening_get_next(seq
, v
);
2073 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2076 rc
= established_get_first(seq
);
2079 case TCP_SEQ_STATE_ESTABLISHED
:
2080 rc
= established_get_next(seq
, v
);
2085 st
->last_pos
= *pos
;
2089 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2091 struct tcp_iter_state
*st
= seq
->private;
2093 switch (st
->state
) {
2094 case TCP_SEQ_STATE_LISTENING
:
2095 if (v
!= SEQ_START_TOKEN
)
2096 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2098 case TCP_SEQ_STATE_ESTABLISHED
:
2100 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2105 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2107 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2108 struct tcp_iter_state
*s
;
2111 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2112 sizeof(struct tcp_iter_state
));
2116 s
= ((struct seq_file
*)file
->private_data
)->private;
2117 s
->family
= afinfo
->family
;
2121 EXPORT_SYMBOL(tcp_seq_open
);
2123 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2126 struct proc_dir_entry
*p
;
2128 afinfo
->seq_ops
.start
= tcp_seq_start
;
2129 afinfo
->seq_ops
.next
= tcp_seq_next
;
2130 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2132 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2133 afinfo
->seq_fops
, afinfo
);
2138 EXPORT_SYMBOL(tcp_proc_register
);
2140 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2142 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2144 EXPORT_SYMBOL(tcp_proc_unregister
);
2146 static void get_openreq4(const struct request_sock
*req
,
2147 struct seq_file
*f
, int i
)
2149 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2150 long delta
= req
->rsk_timer
.expires
- jiffies
;
2152 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2153 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2158 ntohs(ireq
->ir_rmt_port
),
2160 0, 0, /* could print option size, but that is af dependent. */
2161 1, /* timers active (only the expire timer) */
2162 jiffies_delta_to_clock_t(delta
),
2164 from_kuid_munged(seq_user_ns(f
),
2165 sock_i_uid(req
->rsk_listener
)),
2166 0, /* non standard timer */
2167 0, /* open_requests have no inode */
2172 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2175 unsigned long timer_expires
;
2176 const struct tcp_sock
*tp
= tcp_sk(sk
);
2177 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2178 const struct inet_sock
*inet
= inet_sk(sk
);
2179 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2180 __be32 dest
= inet
->inet_daddr
;
2181 __be32 src
= inet
->inet_rcv_saddr
;
2182 __u16 destp
= ntohs(inet
->inet_dport
);
2183 __u16 srcp
= ntohs(inet
->inet_sport
);
2187 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2188 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2189 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2191 timer_expires
= icsk
->icsk_timeout
;
2192 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2194 timer_expires
= icsk
->icsk_timeout
;
2195 } else if (timer_pending(&sk
->sk_timer
)) {
2197 timer_expires
= sk
->sk_timer
.expires
;
2200 timer_expires
= jiffies
;
2203 state
= sk_state_load(sk
);
2204 if (state
== TCP_LISTEN
)
2205 rx_queue
= sk
->sk_ack_backlog
;
2207 /* Because we don't lock the socket,
2208 * we might find a transient negative value.
2210 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2212 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2213 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2214 i
, src
, srcp
, dest
, destp
, state
,
2215 tp
->write_seq
- tp
->snd_una
,
2218 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2219 icsk
->icsk_retransmits
,
2220 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2221 icsk
->icsk_probes_out
,
2223 atomic_read(&sk
->sk_refcnt
), sk
,
2224 jiffies_to_clock_t(icsk
->icsk_rto
),
2225 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2226 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2228 state
== TCP_LISTEN
?
2229 fastopenq
->max_qlen
:
2230 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2233 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2234 struct seq_file
*f
, int i
)
2236 long delta
= tw
->tw_timer
.expires
- jiffies
;
2240 dest
= tw
->tw_daddr
;
2241 src
= tw
->tw_rcv_saddr
;
2242 destp
= ntohs(tw
->tw_dport
);
2243 srcp
= ntohs(tw
->tw_sport
);
2245 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2246 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2247 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2248 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2249 atomic_read(&tw
->tw_refcnt
), tw
);
2254 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2256 struct tcp_iter_state
*st
;
2257 struct sock
*sk
= v
;
2259 seq_setwidth(seq
, TMPSZ
- 1);
2260 if (v
== SEQ_START_TOKEN
) {
2261 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2262 "rx_queue tr tm->when retrnsmt uid timeout "
2268 if (sk
->sk_state
== TCP_TIME_WAIT
)
2269 get_timewait4_sock(v
, seq
, st
->num
);
2270 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2271 get_openreq4(v
, seq
, st
->num
);
2273 get_tcp4_sock(v
, seq
, st
->num
);
2279 static const struct file_operations tcp_afinfo_seq_fops
= {
2280 .owner
= THIS_MODULE
,
2281 .open
= tcp_seq_open
,
2283 .llseek
= seq_lseek
,
2284 .release
= seq_release_net
2287 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2290 .seq_fops
= &tcp_afinfo_seq_fops
,
2292 .show
= tcp4_seq_show
,
2296 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2298 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2301 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2303 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2306 static struct pernet_operations tcp4_net_ops
= {
2307 .init
= tcp4_proc_init_net
,
2308 .exit
= tcp4_proc_exit_net
,
2311 int __init
tcp4_proc_init(void)
2313 return register_pernet_subsys(&tcp4_net_ops
);
2316 void tcp4_proc_exit(void)
2318 unregister_pernet_subsys(&tcp4_net_ops
);
2320 #endif /* CONFIG_PROC_FS */
2322 struct proto tcp_prot
= {
2324 .owner
= THIS_MODULE
,
2326 .connect
= tcp_v4_connect
,
2327 .disconnect
= tcp_disconnect
,
2328 .accept
= inet_csk_accept
,
2330 .init
= tcp_v4_init_sock
,
2331 .destroy
= tcp_v4_destroy_sock
,
2332 .shutdown
= tcp_shutdown
,
2333 .setsockopt
= tcp_setsockopt
,
2334 .getsockopt
= tcp_getsockopt
,
2335 .recvmsg
= tcp_recvmsg
,
2336 .sendmsg
= tcp_sendmsg
,
2337 .sendpage
= tcp_sendpage
,
2338 .backlog_rcv
= tcp_v4_do_rcv
,
2339 .release_cb
= tcp_release_cb
,
2341 .unhash
= inet_unhash
,
2342 .get_port
= inet_csk_get_port
,
2343 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2344 .stream_memory_free
= tcp_stream_memory_free
,
2345 .sockets_allocated
= &tcp_sockets_allocated
,
2346 .orphan_count
= &tcp_orphan_count
,
2347 .memory_allocated
= &tcp_memory_allocated
,
2348 .memory_pressure
= &tcp_memory_pressure
,
2349 .sysctl_mem
= sysctl_tcp_mem
,
2350 .sysctl_wmem
= sysctl_tcp_wmem
,
2351 .sysctl_rmem
= sysctl_tcp_rmem
,
2352 .max_header
= MAX_TCP_HEADER
,
2353 .obj_size
= sizeof(struct tcp_sock
),
2354 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2355 .twsk_prot
= &tcp_timewait_sock_ops
,
2356 .rsk_prot
= &tcp_request_sock_ops
,
2357 .h
.hashinfo
= &tcp_hashinfo
,
2358 .no_autobind
= true,
2359 #ifdef CONFIG_COMPAT
2360 .compat_setsockopt
= compat_tcp_setsockopt
,
2361 .compat_getsockopt
= compat_tcp_getsockopt
,
2363 .diag_destroy
= tcp_abort
,
2365 EXPORT_SYMBOL(tcp_prot
);
2367 static void __net_exit
tcp_sk_exit(struct net
*net
)
2371 for_each_possible_cpu(cpu
)
2372 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2373 free_percpu(net
->ipv4
.tcp_sk
);
2376 static int __net_init
tcp_sk_init(struct net
*net
)
2380 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2381 if (!net
->ipv4
.tcp_sk
)
2384 for_each_possible_cpu(cpu
) {
2387 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2391 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
2392 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2395 net
->ipv4
.sysctl_tcp_ecn
= 2;
2396 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2398 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2399 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2400 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2402 net
->ipv4
.sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
2403 net
->ipv4
.sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
2404 net
->ipv4
.sysctl_tcp_keepalive_intvl
= TCP_KEEPALIVE_INTVL
;
2406 net
->ipv4
.sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
2407 net
->ipv4
.sysctl_tcp_synack_retries
= TCP_SYNACK_RETRIES
;
2408 net
->ipv4
.sysctl_tcp_syncookies
= 1;
2409 net
->ipv4
.sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
2410 net
->ipv4
.sysctl_tcp_retries1
= TCP_RETR1
;
2411 net
->ipv4
.sysctl_tcp_retries2
= TCP_RETR2
;
2412 net
->ipv4
.sysctl_tcp_orphan_retries
= 0;
2413 net
->ipv4
.sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
2414 net
->ipv4
.sysctl_tcp_notsent_lowat
= UINT_MAX
;
2423 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2425 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2428 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2429 .init
= tcp_sk_init
,
2430 .exit
= tcp_sk_exit
,
2431 .exit_batch
= tcp_sk_exit_batch
,
2434 void __init
tcp_v4_init(void)
2436 inet_hashinfo_init(&tcp_hashinfo
);
2437 if (register_pernet_subsys(&tcp_sk_ops
))
2438 panic("Failed to create the TCP control socket.\n");