2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #include <trace/events/tcp.h>
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
92 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
95 struct inet_hashinfo tcp_hashinfo
;
96 EXPORT_SYMBOL(tcp_hashinfo
);
98 static u32
tcp_v4_init_seq(const struct sk_buff
*skb
)
100 return secure_tcp_seq(ip_hdr(skb
)->daddr
,
103 tcp_hdr(skb
)->source
);
106 static u32
tcp_v4_init_ts_off(const struct net
*net
, const struct sk_buff
*skb
)
108 return secure_tcp_ts_off(net
, ip_hdr(skb
)->daddr
, ip_hdr(skb
)->saddr
);
111 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
113 const struct inet_timewait_sock
*tw
= inet_twsk(sktw
);
114 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
115 struct tcp_sock
*tp
= tcp_sk(sk
);
116 int reuse
= sock_net(sk
)->ipv4
.sysctl_tcp_tw_reuse
;
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
123 bool loopback
= false;
124 if (tw
->tw_bound_dev_if
== LOOPBACK_IFINDEX
)
126 #if IS_ENABLED(CONFIG_IPV6)
127 if (tw
->tw_family
== AF_INET6
) {
128 if (ipv6_addr_loopback(&tw
->tw_v6_daddr
) ||
129 (ipv6_addr_v4mapped(&tw
->tw_v6_daddr
) &&
130 (tw
->tw_v6_daddr
.s6_addr
[12] == 127)) ||
131 ipv6_addr_loopback(&tw
->tw_v6_rcv_saddr
) ||
132 (ipv6_addr_v4mapped(&tw
->tw_v6_rcv_saddr
) &&
133 (tw
->tw_v6_rcv_saddr
.s6_addr
[12] == 127)))
138 if (ipv4_is_loopback(tw
->tw_daddr
) ||
139 ipv4_is_loopback(tw
->tw_rcv_saddr
))
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
157 if (tcptw
->tw_ts_recent_stamp
&&
158 (!twp
|| (reuse
&& time_after32(ktime_get_seconds(),
159 tcptw
->tw_ts_recent_stamp
)))) {
160 /* In case of repair and re-using TIME-WAIT sockets we still
161 * want to be sure that it is safe as above but honor the
162 * sequence numbers and time stamps set as part of the repair
165 * Without this check re-using a TIME-WAIT socket with TCP
166 * repair would accumulate a -1 on the repair assigned
167 * sequence number. The first time it is reused the sequence
168 * is -1, the second time -2, etc. This fixes that issue
169 * without appearing to create any others.
171 if (likely(!tp
->repair
)) {
172 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
173 if (tp
->write_seq
== 0)
175 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
176 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
184 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
186 static int tcp_v4_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
189 /* This check is replicated from tcp_v4_connect() and intended to
190 * prevent BPF program called below from accessing bytes that are out
191 * of the bound specified by user in addr_len.
193 if (addr_len
< sizeof(struct sockaddr_in
))
196 sock_owned_by_me(sk
);
198 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk
, uaddr
);
201 /* This will initiate an outgoing connection. */
202 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
204 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
205 struct inet_sock
*inet
= inet_sk(sk
);
206 struct tcp_sock
*tp
= tcp_sk(sk
);
207 __be16 orig_sport
, orig_dport
;
208 __be32 daddr
, nexthop
;
212 struct ip_options_rcu
*inet_opt
;
213 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
215 if (addr_len
< sizeof(struct sockaddr_in
))
218 if (usin
->sin_family
!= AF_INET
)
219 return -EAFNOSUPPORT
;
221 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
222 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
223 lockdep_sock_is_held(sk
));
224 if (inet_opt
&& inet_opt
->opt
.srr
) {
227 nexthop
= inet_opt
->opt
.faddr
;
230 orig_sport
= inet
->inet_sport
;
231 orig_dport
= usin
->sin_port
;
232 fl4
= &inet
->cork
.fl
.u
.ip4
;
233 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
234 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
236 orig_sport
, orig_dport
, sk
);
239 if (err
== -ENETUNREACH
)
240 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
244 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
249 if (!inet_opt
|| !inet_opt
->opt
.srr
)
252 if (!inet
->inet_saddr
)
253 inet
->inet_saddr
= fl4
->saddr
;
254 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
256 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
257 /* Reset inherited state */
258 tp
->rx_opt
.ts_recent
= 0;
259 tp
->rx_opt
.ts_recent_stamp
= 0;
260 if (likely(!tp
->repair
))
264 inet
->inet_dport
= usin
->sin_port
;
265 sk_daddr_set(sk
, daddr
);
267 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
269 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
271 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
273 /* Socket identity is still unknown (sport may be zero).
274 * However we set state to SYN-SENT and not releasing socket
275 * lock select source port, enter ourselves into the hash tables and
276 * complete initialization after this.
278 tcp_set_state(sk
, TCP_SYN_SENT
);
279 err
= inet_hash_connect(tcp_death_row
, sk
);
285 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
286 inet
->inet_sport
, inet
->inet_dport
, sk
);
292 /* OK, now commit destination to socket. */
293 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
294 sk_setup_caps(sk
, &rt
->dst
);
297 if (likely(!tp
->repair
)) {
299 tp
->write_seq
= secure_tcp_seq(inet
->inet_saddr
,
303 tp
->tsoffset
= secure_tcp_ts_off(sock_net(sk
),
308 inet
->inet_id
= tp
->write_seq
^ jiffies
;
310 if (tcp_fastopen_defer_connect(sk
, &err
))
315 err
= tcp_connect(sk
);
324 * This unhashes the socket and releases the local port,
327 tcp_set_state(sk
, TCP_CLOSE
);
329 sk
->sk_route_caps
= 0;
330 inet
->inet_dport
= 0;
333 EXPORT_SYMBOL(tcp_v4_connect
);
336 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
337 * It can be called through tcp_release_cb() if socket was owned by user
338 * at the time tcp_v4_err() was called to handle ICMP message.
340 void tcp_v4_mtu_reduced(struct sock
*sk
)
342 struct inet_sock
*inet
= inet_sk(sk
);
343 struct dst_entry
*dst
;
346 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
348 mtu
= tcp_sk(sk
)->mtu_info
;
349 dst
= inet_csk_update_pmtu(sk
, mtu
);
353 /* Something is about to be wrong... Remember soft error
354 * for the case, if this connection will not able to recover.
356 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
357 sk
->sk_err_soft
= EMSGSIZE
;
361 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
362 ip_sk_accept_pmtu(sk
) &&
363 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
364 tcp_sync_mss(sk
, mtu
);
366 /* Resend the TCP packet because it's
367 * clear that the old packet has been
368 * dropped. This is the new "fast" path mtu
371 tcp_simple_retransmit(sk
);
372 } /* else let the usual retransmit timer handle it */
374 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
376 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
378 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
381 dst
->ops
->redirect(dst
, sk
, skb
);
385 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
386 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
388 struct request_sock
*req
= inet_reqsk(sk
);
389 struct net
*net
= sock_net(sk
);
391 /* ICMPs are not backlogged, hence we cannot get
392 * an established socket here.
394 if (seq
!= tcp_rsk(req
)->snt_isn
) {
395 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
398 * Still in SYN_RECV, just remove it silently.
399 * There is no good way to pass the error to the newly
400 * created socket, and POSIX does not want network
401 * errors returned from accept().
403 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
404 tcp_listendrop(req
->rsk_listener
);
408 EXPORT_SYMBOL(tcp_req_err
);
411 * This routine is called by the ICMP module when it gets some
412 * sort of error condition. If err < 0 then the socket should
413 * be closed and the error returned to the user. If err > 0
414 * it's just the icmp type << 8 | icmp code. After adjustment
415 * header points to the first 8 bytes of the tcp header. We need
416 * to find the appropriate port.
418 * The locking strategy used here is very "optimistic". When
419 * someone else accesses the socket the ICMP is just dropped
420 * and for some paths there is no check at all.
421 * A more general error queue to queue errors for later handling
422 * is probably better.
426 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
428 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
429 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
430 struct inet_connection_sock
*icsk
;
432 struct inet_sock
*inet
;
433 const int type
= icmp_hdr(icmp_skb
)->type
;
434 const int code
= icmp_hdr(icmp_skb
)->code
;
437 struct request_sock
*fastopen
;
442 struct net
*net
= dev_net(icmp_skb
->dev
);
444 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
445 th
->dest
, iph
->saddr
, ntohs(th
->source
),
446 inet_iif(icmp_skb
), 0);
448 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
451 if (sk
->sk_state
== TCP_TIME_WAIT
) {
452 inet_twsk_put(inet_twsk(sk
));
455 seq
= ntohl(th
->seq
);
456 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
457 return tcp_req_err(sk
, seq
,
458 type
== ICMP_PARAMETERPROB
||
459 type
== ICMP_TIME_EXCEEDED
||
460 (type
== ICMP_DEST_UNREACH
&&
461 (code
== ICMP_NET_UNREACH
||
462 code
== ICMP_HOST_UNREACH
)));
465 /* If too many ICMPs get dropped on busy
466 * servers this needs to be solved differently.
467 * We do take care of PMTU discovery (RFC1191) special case :
468 * we can receive locally generated ICMP messages while socket is held.
470 if (sock_owned_by_user(sk
)) {
471 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
472 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
474 if (sk
->sk_state
== TCP_CLOSE
)
477 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
478 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
484 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
485 fastopen
= tp
->fastopen_rsk
;
486 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
487 if (sk
->sk_state
!= TCP_LISTEN
&&
488 !between(seq
, snd_una
, tp
->snd_nxt
)) {
489 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
495 if (!sock_owned_by_user(sk
))
496 do_redirect(icmp_skb
, sk
);
498 case ICMP_SOURCE_QUENCH
:
499 /* Just silently ignore these. */
501 case ICMP_PARAMETERPROB
:
504 case ICMP_DEST_UNREACH
:
505 if (code
> NR_ICMP_UNREACH
)
508 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
509 /* We are not interested in TCP_LISTEN and open_requests
510 * (SYN-ACKs send out by Linux are always <576bytes so
511 * they should go through unfragmented).
513 if (sk
->sk_state
== TCP_LISTEN
)
517 if (!sock_owned_by_user(sk
)) {
518 tcp_v4_mtu_reduced(sk
);
520 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &sk
->sk_tsq_flags
))
526 err
= icmp_err_convert
[code
].errno
;
527 /* check if icmp_skb allows revert of backoff
528 * (see draft-zimmermann-tcp-lcd) */
529 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
531 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
532 !icsk
->icsk_backoff
|| fastopen
)
535 if (sock_owned_by_user(sk
))
538 icsk
->icsk_backoff
--;
539 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
541 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
543 skb
= tcp_rtx_queue_head(sk
);
546 tcp_mstamp_refresh(tp
);
547 delta_us
= (u32
)(tp
->tcp_mstamp
- skb
->skb_mstamp
);
548 remaining
= icsk
->icsk_rto
-
549 usecs_to_jiffies(delta_us
);
552 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
553 remaining
, TCP_RTO_MAX
);
555 /* RTO revert clocked out retransmission.
556 * Will retransmit now */
557 tcp_retransmit_timer(sk
);
561 case ICMP_TIME_EXCEEDED
:
568 switch (sk
->sk_state
) {
571 /* Only in fast or simultaneous open. If a fast open socket is
572 * is already accepted it is treated as a connected one below.
574 if (fastopen
&& !fastopen
->sk
)
577 if (!sock_owned_by_user(sk
)) {
580 sk
->sk_error_report(sk
);
584 sk
->sk_err_soft
= err
;
589 /* If we've already connected we will keep trying
590 * until we time out, or the user gives up.
592 * rfc1122 4.2.3.9 allows to consider as hard errors
593 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
594 * but it is obsoleted by pmtu discovery).
596 * Note, that in modern internet, where routing is unreliable
597 * and in each dark corner broken firewalls sit, sending random
598 * errors ordered by their masters even this two messages finally lose
599 * their original sense (even Linux sends invalid PORT_UNREACHs)
601 * Now we are in compliance with RFCs.
606 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
608 sk
->sk_error_report(sk
);
609 } else { /* Only an error on timeout */
610 sk
->sk_err_soft
= err
;
618 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
620 struct tcphdr
*th
= tcp_hdr(skb
);
622 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
623 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
624 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
627 /* This routine computes an IPv4 TCP checksum. */
628 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
630 const struct inet_sock
*inet
= inet_sk(sk
);
632 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
634 EXPORT_SYMBOL(tcp_v4_send_check
);
637 * This routine will send an RST to the other tcp.
639 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
641 * Answer: if a packet caused RST, it is not for a socket
642 * existing in our system, if it is matched to a socket,
643 * it is just duplicate segment or bug in other side's TCP.
644 * So that we build reply only basing on parameters
645 * arrived with segment.
646 * Exception: precedence violation. We do not implement it in any case.
649 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
651 const struct tcphdr
*th
= tcp_hdr(skb
);
654 #ifdef CONFIG_TCP_MD5SIG
655 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
658 struct ip_reply_arg arg
;
659 #ifdef CONFIG_TCP_MD5SIG
660 struct tcp_md5sig_key
*key
= NULL
;
661 const __u8
*hash_location
= NULL
;
662 unsigned char newhash
[16];
664 struct sock
*sk1
= NULL
;
669 /* Never send a reset in response to a reset. */
673 /* If sk not NULL, it means we did a successful lookup and incoming
674 * route had to be correct. prequeue might have dropped our dst.
676 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
679 /* Swap the send and the receive. */
680 memset(&rep
, 0, sizeof(rep
));
681 rep
.th
.dest
= th
->source
;
682 rep
.th
.source
= th
->dest
;
683 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
687 rep
.th
.seq
= th
->ack_seq
;
690 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
691 skb
->len
- (th
->doff
<< 2));
694 memset(&arg
, 0, sizeof(arg
));
695 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
696 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
698 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
699 #ifdef CONFIG_TCP_MD5SIG
701 hash_location
= tcp_parse_md5sig_option(th
);
702 if (sk
&& sk_fullsock(sk
)) {
703 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
704 &ip_hdr(skb
)->saddr
, AF_INET
);
705 } else if (hash_location
) {
707 * active side is lost. Try to find listening socket through
708 * source port, and then find md5 key through listening socket.
709 * we are not loose security here:
710 * Incoming packet is checked with md5 hash with finding key,
711 * no RST generated if md5 hash doesn't match.
713 sk1
= __inet_lookup_listener(net
, &tcp_hashinfo
, NULL
, 0,
715 th
->source
, ip_hdr(skb
)->daddr
,
716 ntohs(th
->source
), inet_iif(skb
),
718 /* don't send rst if it can't find key */
722 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
723 &ip_hdr(skb
)->saddr
, AF_INET
);
728 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
729 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
735 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
737 (TCPOPT_MD5SIG
<< 8) |
739 /* Update length and the length the header thinks exists */
740 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
741 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
743 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
744 key
, ip_hdr(skb
)->saddr
,
745 ip_hdr(skb
)->daddr
, &rep
.th
);
748 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
749 ip_hdr(skb
)->saddr
, /* XXX */
750 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
751 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
752 arg
.flags
= (sk
&& inet_sk_transparent(sk
)) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
754 /* When socket is gone, all binding information is lost.
755 * routing might fail in this case. No choice here, if we choose to force
756 * input interface, we will misroute in case of asymmetric route.
759 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
761 trace_tcp_send_reset(sk
, skb
);
764 BUILD_BUG_ON(offsetof(struct sock
, sk_bound_dev_if
) !=
765 offsetof(struct inet_timewait_sock
, tw_bound_dev_if
));
767 arg
.tos
= ip_hdr(skb
)->tos
;
768 arg
.uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
770 ctl_sk
= *this_cpu_ptr(net
->ipv4
.tcp_sk
);
772 ctl_sk
->sk_mark
= (sk
->sk_state
== TCP_TIME_WAIT
) ?
773 inet_twsk(sk
)->tw_mark
: sk
->sk_mark
;
774 ip_send_unicast_reply(ctl_sk
,
775 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
776 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
777 &arg
, arg
.iov
[0].iov_len
);
780 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
781 __TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
784 #ifdef CONFIG_TCP_MD5SIG
790 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
791 outside socket context is ugly, certainly. What can I do?
794 static void tcp_v4_send_ack(const struct sock
*sk
,
795 struct sk_buff
*skb
, u32 seq
, u32 ack
,
796 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
797 struct tcp_md5sig_key
*key
,
798 int reply_flags
, u8 tos
)
800 const struct tcphdr
*th
= tcp_hdr(skb
);
803 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
804 #ifdef CONFIG_TCP_MD5SIG
805 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
809 struct net
*net
= sock_net(sk
);
810 struct ip_reply_arg arg
;
813 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
814 memset(&arg
, 0, sizeof(arg
));
816 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
817 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
819 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
820 (TCPOPT_TIMESTAMP
<< 8) |
822 rep
.opt
[1] = htonl(tsval
);
823 rep
.opt
[2] = htonl(tsecr
);
824 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
827 /* Swap the send and the receive. */
828 rep
.th
.dest
= th
->source
;
829 rep
.th
.source
= th
->dest
;
830 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
831 rep
.th
.seq
= htonl(seq
);
832 rep
.th
.ack_seq
= htonl(ack
);
834 rep
.th
.window
= htons(win
);
836 #ifdef CONFIG_TCP_MD5SIG
838 int offset
= (tsecr
) ? 3 : 0;
840 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
842 (TCPOPT_MD5SIG
<< 8) |
844 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
845 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
847 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
848 key
, ip_hdr(skb
)->saddr
,
849 ip_hdr(skb
)->daddr
, &rep
.th
);
852 arg
.flags
= reply_flags
;
853 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
854 ip_hdr(skb
)->saddr
, /* XXX */
855 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
856 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
858 arg
.bound_dev_if
= oif
;
860 arg
.uid
= sock_net_uid(net
, sk_fullsock(sk
) ? sk
: NULL
);
862 ctl_sk
= *this_cpu_ptr(net
->ipv4
.tcp_sk
);
864 ctl_sk
->sk_mark
= (sk
->sk_state
== TCP_TIME_WAIT
) ?
865 inet_twsk(sk
)->tw_mark
: sk
->sk_mark
;
866 ip_send_unicast_reply(ctl_sk
,
867 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
868 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
869 &arg
, arg
.iov
[0].iov_len
);
872 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
876 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
878 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
879 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
881 tcp_v4_send_ack(sk
, skb
,
882 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
883 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
884 tcp_time_stamp_raw() + tcptw
->tw_ts_offset
,
887 tcp_twsk_md5_key(tcptw
),
888 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
895 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
896 struct request_sock
*req
)
898 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
899 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
901 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
905 * The window field (SEG.WND) of every outgoing segment, with the
906 * exception of <SYN> segments, MUST be right-shifted by
907 * Rcv.Wind.Shift bits:
909 tcp_v4_send_ack(sk
, skb
, seq
,
910 tcp_rsk(req
)->rcv_nxt
,
911 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
912 tcp_time_stamp_raw() + tcp_rsk(req
)->ts_off
,
915 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->saddr
,
917 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
922 * Send a SYN-ACK after having received a SYN.
923 * This still operates on a request_sock only, not on a big
926 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
928 struct request_sock
*req
,
929 struct tcp_fastopen_cookie
*foc
,
930 enum tcp_synack_type synack_type
)
932 const struct inet_request_sock
*ireq
= inet_rsk(req
);
937 /* First, grab a route. */
938 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
941 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
944 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
946 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
948 ireq_opt_deref(ireq
));
949 err
= net_xmit_eval(err
);
956 * IPv4 request_sock destructor.
958 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
960 kfree(rcu_dereference_protected(inet_rsk(req
)->ireq_opt
, 1));
963 #ifdef CONFIG_TCP_MD5SIG
965 * RFC2385 MD5 checksumming requires a mapping of
966 * IP address->MD5 Key.
967 * We need to maintain these in the sk structure.
970 /* Find the Key structure for an address. */
971 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
972 const union tcp_md5_addr
*addr
,
975 const struct tcp_sock
*tp
= tcp_sk(sk
);
976 struct tcp_md5sig_key
*key
;
977 const struct tcp_md5sig_info
*md5sig
;
979 struct tcp_md5sig_key
*best_match
= NULL
;
982 /* caller either holds rcu_read_lock() or socket lock */
983 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
984 lockdep_sock_is_held(sk
));
988 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
989 if (key
->family
!= family
)
992 if (family
== AF_INET
) {
993 mask
= inet_make_mask(key
->prefixlen
);
994 match
= (key
->addr
.a4
.s_addr
& mask
) ==
995 (addr
->a4
.s_addr
& mask
);
996 #if IS_ENABLED(CONFIG_IPV6)
997 } else if (family
== AF_INET6
) {
998 match
= ipv6_prefix_equal(&key
->addr
.a6
, &addr
->a6
,
1005 if (match
&& (!best_match
||
1006 key
->prefixlen
> best_match
->prefixlen
))
1011 EXPORT_SYMBOL(tcp_md5_do_lookup
);
1013 static struct tcp_md5sig_key
*tcp_md5_do_lookup_exact(const struct sock
*sk
,
1014 const union tcp_md5_addr
*addr
,
1015 int family
, u8 prefixlen
)
1017 const struct tcp_sock
*tp
= tcp_sk(sk
);
1018 struct tcp_md5sig_key
*key
;
1019 unsigned int size
= sizeof(struct in_addr
);
1020 const struct tcp_md5sig_info
*md5sig
;
1022 /* caller either holds rcu_read_lock() or socket lock */
1023 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
1024 lockdep_sock_is_held(sk
));
1027 #if IS_ENABLED(CONFIG_IPV6)
1028 if (family
== AF_INET6
)
1029 size
= sizeof(struct in6_addr
);
1031 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
1032 if (key
->family
!= family
)
1034 if (!memcmp(&key
->addr
, addr
, size
) &&
1035 key
->prefixlen
== prefixlen
)
1041 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
1042 const struct sock
*addr_sk
)
1044 const union tcp_md5_addr
*addr
;
1046 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
1047 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
1049 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
1051 /* This can be called on a newly created socket, from other files */
1052 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
1053 int family
, u8 prefixlen
, const u8
*newkey
, u8 newkeylen
,
1056 /* Add Key to the list */
1057 struct tcp_md5sig_key
*key
;
1058 struct tcp_sock
*tp
= tcp_sk(sk
);
1059 struct tcp_md5sig_info
*md5sig
;
1061 key
= tcp_md5_do_lookup_exact(sk
, addr
, family
, prefixlen
);
1063 /* Pre-existing entry - just update that one. */
1064 memcpy(key
->key
, newkey
, newkeylen
);
1065 key
->keylen
= newkeylen
;
1069 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1070 lockdep_sock_is_held(sk
));
1072 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1076 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1077 INIT_HLIST_HEAD(&md5sig
->head
);
1078 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1081 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1084 if (!tcp_alloc_md5sig_pool()) {
1085 sock_kfree_s(sk
, key
, sizeof(*key
));
1089 memcpy(key
->key
, newkey
, newkeylen
);
1090 key
->keylen
= newkeylen
;
1091 key
->family
= family
;
1092 key
->prefixlen
= prefixlen
;
1093 memcpy(&key
->addr
, addr
,
1094 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1095 sizeof(struct in_addr
));
1096 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1099 EXPORT_SYMBOL(tcp_md5_do_add
);
1101 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
,
1104 struct tcp_md5sig_key
*key
;
1106 key
= tcp_md5_do_lookup_exact(sk
, addr
, family
, prefixlen
);
1109 hlist_del_rcu(&key
->node
);
1110 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1111 kfree_rcu(key
, rcu
);
1114 EXPORT_SYMBOL(tcp_md5_do_del
);
1116 static void tcp_clear_md5_list(struct sock
*sk
)
1118 struct tcp_sock
*tp
= tcp_sk(sk
);
1119 struct tcp_md5sig_key
*key
;
1120 struct hlist_node
*n
;
1121 struct tcp_md5sig_info
*md5sig
;
1123 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1125 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1126 hlist_del_rcu(&key
->node
);
1127 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1128 kfree_rcu(key
, rcu
);
1132 static int tcp_v4_parse_md5_keys(struct sock
*sk
, int optname
,
1133 char __user
*optval
, int optlen
)
1135 struct tcp_md5sig cmd
;
1136 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1139 if (optlen
< sizeof(cmd
))
1142 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1145 if (sin
->sin_family
!= AF_INET
)
1148 if (optname
== TCP_MD5SIG_EXT
&&
1149 cmd
.tcpm_flags
& TCP_MD5SIG_FLAG_PREFIX
) {
1150 prefixlen
= cmd
.tcpm_prefixlen
;
1155 if (!cmd
.tcpm_keylen
)
1156 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1157 AF_INET
, prefixlen
);
1159 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1162 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1163 AF_INET
, prefixlen
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1167 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
1168 __be32 daddr
, __be32 saddr
,
1169 const struct tcphdr
*th
, int nbytes
)
1171 struct tcp4_pseudohdr
*bp
;
1172 struct scatterlist sg
;
1179 bp
->protocol
= IPPROTO_TCP
;
1180 bp
->len
= cpu_to_be16(nbytes
);
1182 _th
= (struct tcphdr
*)(bp
+ 1);
1183 memcpy(_th
, th
, sizeof(*th
));
1186 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
1187 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
1188 sizeof(*bp
) + sizeof(*th
));
1189 return crypto_ahash_update(hp
->md5_req
);
1192 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1193 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1195 struct tcp_md5sig_pool
*hp
;
1196 struct ahash_request
*req
;
1198 hp
= tcp_get_md5sig_pool();
1200 goto clear_hash_noput
;
1203 if (crypto_ahash_init(req
))
1205 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
1207 if (tcp_md5_hash_key(hp
, key
))
1209 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1210 if (crypto_ahash_final(req
))
1213 tcp_put_md5sig_pool();
1217 tcp_put_md5sig_pool();
1219 memset(md5_hash
, 0, 16);
1223 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1224 const struct sock
*sk
,
1225 const struct sk_buff
*skb
)
1227 struct tcp_md5sig_pool
*hp
;
1228 struct ahash_request
*req
;
1229 const struct tcphdr
*th
= tcp_hdr(skb
);
1230 __be32 saddr
, daddr
;
1232 if (sk
) { /* valid for establish/request sockets */
1233 saddr
= sk
->sk_rcv_saddr
;
1234 daddr
= sk
->sk_daddr
;
1236 const struct iphdr
*iph
= ip_hdr(skb
);
1241 hp
= tcp_get_md5sig_pool();
1243 goto clear_hash_noput
;
1246 if (crypto_ahash_init(req
))
1249 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
1251 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1253 if (tcp_md5_hash_key(hp
, key
))
1255 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1256 if (crypto_ahash_final(req
))
1259 tcp_put_md5sig_pool();
1263 tcp_put_md5sig_pool();
1265 memset(md5_hash
, 0, 16);
1268 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1272 /* Called with rcu_read_lock() */
1273 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1274 const struct sk_buff
*skb
)
1276 #ifdef CONFIG_TCP_MD5SIG
1278 * This gets called for each TCP segment that arrives
1279 * so we want to be efficient.
1280 * We have 3 drop cases:
1281 * o No MD5 hash and one expected.
1282 * o MD5 hash and we're not expecting one.
1283 * o MD5 hash and its wrong.
1285 const __u8
*hash_location
= NULL
;
1286 struct tcp_md5sig_key
*hash_expected
;
1287 const struct iphdr
*iph
= ip_hdr(skb
);
1288 const struct tcphdr
*th
= tcp_hdr(skb
);
1290 unsigned char newhash
[16];
1292 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1294 hash_location
= tcp_parse_md5sig_option(th
);
1296 /* We've parsed the options - do we have a hash? */
1297 if (!hash_expected
&& !hash_location
)
1300 if (hash_expected
&& !hash_location
) {
1301 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1305 if (!hash_expected
&& hash_location
) {
1306 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1310 /* Okay, so this is hash_expected and hash_location -
1311 * so we need to calculate the checksum.
1313 genhash
= tcp_v4_md5_hash_skb(newhash
,
1317 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1318 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
1319 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1320 &iph
->saddr
, ntohs(th
->source
),
1321 &iph
->daddr
, ntohs(th
->dest
),
1322 genhash
? " tcp_v4_calc_md5_hash failed"
1331 static void tcp_v4_init_req(struct request_sock
*req
,
1332 const struct sock
*sk_listener
,
1333 struct sk_buff
*skb
)
1335 struct inet_request_sock
*ireq
= inet_rsk(req
);
1336 struct net
*net
= sock_net(sk_listener
);
1338 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1339 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1340 RCU_INIT_POINTER(ireq
->ireq_opt
, tcp_v4_save_options(net
, skb
));
1343 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1345 const struct request_sock
*req
)
1347 return inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1350 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1352 .obj_size
= sizeof(struct tcp_request_sock
),
1353 .rtx_syn_ack
= tcp_rtx_synack
,
1354 .send_ack
= tcp_v4_reqsk_send_ack
,
1355 .destructor
= tcp_v4_reqsk_destructor
,
1356 .send_reset
= tcp_v4_send_reset
,
1357 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1360 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1361 .mss_clamp
= TCP_MSS_DEFAULT
,
1362 #ifdef CONFIG_TCP_MD5SIG
1363 .req_md5_lookup
= tcp_v4_md5_lookup
,
1364 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1366 .init_req
= tcp_v4_init_req
,
1367 #ifdef CONFIG_SYN_COOKIES
1368 .cookie_init_seq
= cookie_v4_init_sequence
,
1370 .route_req
= tcp_v4_route_req
,
1371 .init_seq
= tcp_v4_init_seq
,
1372 .init_ts_off
= tcp_v4_init_ts_off
,
1373 .send_synack
= tcp_v4_send_synack
,
1376 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1378 /* Never answer to SYNs send to broadcast or multicast */
1379 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1382 return tcp_conn_request(&tcp_request_sock_ops
,
1383 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1389 EXPORT_SYMBOL(tcp_v4_conn_request
);
1393 * The three way handshake has completed - we got a valid synack -
1394 * now create the new socket.
1396 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1397 struct request_sock
*req
,
1398 struct dst_entry
*dst
,
1399 struct request_sock
*req_unhash
,
1402 struct inet_request_sock
*ireq
;
1403 struct inet_sock
*newinet
;
1404 struct tcp_sock
*newtp
;
1406 #ifdef CONFIG_TCP_MD5SIG
1407 struct tcp_md5sig_key
*key
;
1409 struct ip_options_rcu
*inet_opt
;
1411 if (sk_acceptq_is_full(sk
))
1414 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1418 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1419 inet_sk_rx_dst_set(newsk
, skb
);
1421 newtp
= tcp_sk(newsk
);
1422 newinet
= inet_sk(newsk
);
1423 ireq
= inet_rsk(req
);
1424 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1425 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1426 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1427 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1428 inet_opt
= rcu_dereference(ireq
->ireq_opt
);
1429 RCU_INIT_POINTER(newinet
->inet_opt
, inet_opt
);
1430 newinet
->mc_index
= inet_iif(skb
);
1431 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1432 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1433 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1435 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1436 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1439 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1443 /* syncookie case : see end of cookie_v4_check() */
1445 sk_setup_caps(newsk
, dst
);
1447 tcp_ca_openreq_child(newsk
, dst
);
1449 tcp_sync_mss(newsk
, dst_mtu(dst
));
1450 newtp
->advmss
= tcp_mss_clamp(tcp_sk(sk
), dst_metric_advmss(dst
));
1452 tcp_initialize_rcv_mss(newsk
);
1454 #ifdef CONFIG_TCP_MD5SIG
1455 /* Copy over the MD5 key from the original socket */
1456 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1460 * We're using one, so create a matching key
1461 * on the newsk structure. If we fail to get
1462 * memory, then we end up not copying the key
1465 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1466 AF_INET
, 32, key
->key
, key
->keylen
, GFP_ATOMIC
);
1467 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1471 if (__inet_inherit_port(sk
, newsk
) < 0)
1473 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1474 if (likely(*own_req
)) {
1475 tcp_move_syn(newtp
, req
);
1476 ireq
->ireq_opt
= NULL
;
1478 newinet
->inet_opt
= NULL
;
1483 NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1490 newinet
->inet_opt
= NULL
;
1491 inet_csk_prepare_forced_close(newsk
);
1495 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1497 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1499 #ifdef CONFIG_SYN_COOKIES
1500 const struct tcphdr
*th
= tcp_hdr(skb
);
1503 sk
= cookie_v4_check(sk
, skb
);
1508 /* The socket must have it's spinlock held when we get
1509 * here, unless it is a TCP_LISTEN socket.
1511 * We have a potential double-lock case here, so even when
1512 * doing backlog processing we use the BH locking scheme.
1513 * This is because we cannot sleep with the original spinlock
1516 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1520 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1521 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1523 sock_rps_save_rxhash(sk
, skb
);
1524 sk_mark_napi_id(sk
, skb
);
1526 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1527 !dst
->ops
->check(dst
, 0)) {
1529 sk
->sk_rx_dst
= NULL
;
1532 tcp_rcv_established(sk
, skb
);
1536 if (tcp_checksum_complete(skb
))
1539 if (sk
->sk_state
== TCP_LISTEN
) {
1540 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1545 if (tcp_child_process(sk
, nsk
, skb
)) {
1552 sock_rps_save_rxhash(sk
, skb
);
1554 if (tcp_rcv_state_process(sk
, skb
)) {
1561 tcp_v4_send_reset(rsk
, skb
);
1564 /* Be careful here. If this function gets more complicated and
1565 * gcc suffers from register pressure on the x86, sk (in %ebx)
1566 * might be destroyed here. This current version compiles correctly,
1567 * but you have been warned.
1572 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1573 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1576 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1578 int tcp_v4_early_demux(struct sk_buff
*skb
)
1580 const struct iphdr
*iph
;
1581 const struct tcphdr
*th
;
1584 if (skb
->pkt_type
!= PACKET_HOST
)
1587 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1593 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1596 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1597 iph
->saddr
, th
->source
,
1598 iph
->daddr
, ntohs(th
->dest
),
1599 skb
->skb_iif
, inet_sdif(skb
));
1602 skb
->destructor
= sock_edemux
;
1603 if (sk_fullsock(sk
)) {
1604 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1607 dst
= dst_check(dst
, 0);
1609 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1610 skb_dst_set_noref(skb
, dst
);
1616 bool tcp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
1618 u32 limit
= sk
->sk_rcvbuf
+ sk
->sk_sndbuf
;
1620 /* Only socket owner can try to collapse/prune rx queues
1621 * to reduce memory overhead, so add a little headroom here.
1622 * Few sockets backlog are possibly concurrently non empty.
1626 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1627 * we can fix skb->truesize to its real value to avoid future drops.
1628 * This is valid because skb is not yet charged to the socket.
1629 * It has been noticed pure SACK packets were sometimes dropped
1630 * (if cooked by drivers without copybreak feature).
1634 if (unlikely(sk_add_backlog(sk
, skb
, limit
))) {
1636 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPBACKLOGDROP
);
1641 EXPORT_SYMBOL(tcp_add_backlog
);
1643 int tcp_filter(struct sock
*sk
, struct sk_buff
*skb
)
1645 struct tcphdr
*th
= (struct tcphdr
*)skb
->data
;
1646 unsigned int eaten
= skb
->len
;
1649 err
= sk_filter_trim_cap(sk
, skb
, th
->doff
* 4);
1652 TCP_SKB_CB(skb
)->end_seq
-= eaten
;
1656 EXPORT_SYMBOL(tcp_filter
);
1658 static void tcp_v4_restore_cb(struct sk_buff
*skb
)
1660 memmove(IPCB(skb
), &TCP_SKB_CB(skb
)->header
.h4
,
1661 sizeof(struct inet_skb_parm
));
1664 static void tcp_v4_fill_cb(struct sk_buff
*skb
, const struct iphdr
*iph
,
1665 const struct tcphdr
*th
)
1667 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1668 * barrier() makes sure compiler wont play fool^Waliasing games.
1670 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1671 sizeof(struct inet_skb_parm
));
1674 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1675 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1676 skb
->len
- th
->doff
* 4);
1677 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1678 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1679 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1680 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1681 TCP_SKB_CB(skb
)->sacked
= 0;
1682 TCP_SKB_CB(skb
)->has_rxtstamp
=
1683 skb
->tstamp
|| skb_hwtstamps(skb
)->hwtstamp
;
1690 int tcp_v4_rcv(struct sk_buff
*skb
)
1692 struct net
*net
= dev_net(skb
->dev
);
1693 int sdif
= inet_sdif(skb
);
1694 const struct iphdr
*iph
;
1695 const struct tcphdr
*th
;
1700 if (skb
->pkt_type
!= PACKET_HOST
)
1703 /* Count it even if it's bad */
1704 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1706 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1709 th
= (const struct tcphdr
*)skb
->data
;
1711 if (unlikely(th
->doff
< sizeof(struct tcphdr
) / 4))
1713 if (!pskb_may_pull(skb
, th
->doff
* 4))
1716 /* An explanation is required here, I think.
1717 * Packet length and doff are validated by header prediction,
1718 * provided case of th->doff==0 is eliminated.
1719 * So, we defer the checks. */
1721 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1724 th
= (const struct tcphdr
*)skb
->data
;
1727 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
), th
->source
,
1728 th
->dest
, sdif
, &refcounted
);
1733 if (sk
->sk_state
== TCP_TIME_WAIT
)
1736 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1737 struct request_sock
*req
= inet_reqsk(sk
);
1738 bool req_stolen
= false;
1741 sk
= req
->rsk_listener
;
1742 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1743 sk_drops_add(sk
, skb
);
1747 if (tcp_checksum_complete(skb
)) {
1751 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1752 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1755 /* We own a reference on the listener, increase it again
1756 * as we might lose it too soon.
1761 if (!tcp_filter(sk
, skb
)) {
1762 th
= (const struct tcphdr
*)skb
->data
;
1764 tcp_v4_fill_cb(skb
, iph
, th
);
1765 nsk
= tcp_check_req(sk
, skb
, req
, false, &req_stolen
);
1770 /* Another cpu got exclusive access to req
1771 * and created a full blown socket.
1772 * Try to feed this packet to this socket
1773 * instead of discarding it.
1775 tcp_v4_restore_cb(skb
);
1779 goto discard_and_relse
;
1783 tcp_v4_restore_cb(skb
);
1784 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1785 tcp_v4_send_reset(nsk
, skb
);
1786 goto discard_and_relse
;
1792 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1793 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1794 goto discard_and_relse
;
1797 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1798 goto discard_and_relse
;
1800 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1801 goto discard_and_relse
;
1805 if (tcp_filter(sk
, skb
))
1806 goto discard_and_relse
;
1807 th
= (const struct tcphdr
*)skb
->data
;
1809 tcp_v4_fill_cb(skb
, iph
, th
);
1813 if (sk
->sk_state
== TCP_LISTEN
) {
1814 ret
= tcp_v4_do_rcv(sk
, skb
);
1815 goto put_and_return
;
1818 sk_incoming_cpu_update(sk
);
1820 bh_lock_sock_nested(sk
);
1821 tcp_segs_in(tcp_sk(sk
), skb
);
1823 if (!sock_owned_by_user(sk
)) {
1824 ret
= tcp_v4_do_rcv(sk
, skb
);
1825 } else if (tcp_add_backlog(sk
, skb
)) {
1826 goto discard_and_relse
;
1837 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1840 tcp_v4_fill_cb(skb
, iph
, th
);
1842 if (tcp_checksum_complete(skb
)) {
1844 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1846 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1848 tcp_v4_send_reset(NULL
, skb
);
1852 /* Discard frame. */
1857 sk_drops_add(sk
, skb
);
1863 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1864 inet_twsk_put(inet_twsk(sk
));
1868 tcp_v4_fill_cb(skb
, iph
, th
);
1870 if (tcp_checksum_complete(skb
)) {
1871 inet_twsk_put(inet_twsk(sk
));
1874 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1876 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1879 iph
->saddr
, th
->source
,
1880 iph
->daddr
, th
->dest
,
1884 inet_twsk_deschedule_put(inet_twsk(sk
));
1886 tcp_v4_restore_cb(skb
);
1894 tcp_v4_timewait_ack(sk
, skb
);
1897 tcp_v4_send_reset(sk
, skb
);
1898 inet_twsk_deschedule_put(inet_twsk(sk
));
1900 case TCP_TW_SUCCESS
:;
1905 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1906 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1907 .twsk_unique
= tcp_twsk_unique
,
1908 .twsk_destructor
= tcp_twsk_destructor
,
1911 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1913 struct dst_entry
*dst
= skb_dst(skb
);
1915 if (dst
&& dst_hold_safe(dst
)) {
1916 sk
->sk_rx_dst
= dst
;
1917 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1920 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1922 const struct inet_connection_sock_af_ops ipv4_specific
= {
1923 .queue_xmit
= ip_queue_xmit
,
1924 .send_check
= tcp_v4_send_check
,
1925 .rebuild_header
= inet_sk_rebuild_header
,
1926 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1927 .conn_request
= tcp_v4_conn_request
,
1928 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1929 .net_header_len
= sizeof(struct iphdr
),
1930 .setsockopt
= ip_setsockopt
,
1931 .getsockopt
= ip_getsockopt
,
1932 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1933 .sockaddr_len
= sizeof(struct sockaddr_in
),
1934 #ifdef CONFIG_COMPAT
1935 .compat_setsockopt
= compat_ip_setsockopt
,
1936 .compat_getsockopt
= compat_ip_getsockopt
,
1938 .mtu_reduced
= tcp_v4_mtu_reduced
,
1940 EXPORT_SYMBOL(ipv4_specific
);
1942 #ifdef CONFIG_TCP_MD5SIG
1943 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1944 .md5_lookup
= tcp_v4_md5_lookup
,
1945 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1946 .md5_parse
= tcp_v4_parse_md5_keys
,
1950 /* NOTE: A lot of things set to zero explicitly by call to
1951 * sk_alloc() so need not be done here.
1953 static int tcp_v4_init_sock(struct sock
*sk
)
1955 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1959 icsk
->icsk_af_ops
= &ipv4_specific
;
1961 #ifdef CONFIG_TCP_MD5SIG
1962 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1968 void tcp_v4_destroy_sock(struct sock
*sk
)
1970 struct tcp_sock
*tp
= tcp_sk(sk
);
1972 trace_tcp_destroy_sock(sk
);
1974 tcp_clear_xmit_timers(sk
);
1976 tcp_cleanup_congestion_control(sk
);
1978 tcp_cleanup_ulp(sk
);
1980 /* Cleanup up the write buffer. */
1981 tcp_write_queue_purge(sk
);
1983 /* Check if we want to disable active TFO */
1984 tcp_fastopen_active_disable_ofo_check(sk
);
1986 /* Cleans up our, hopefully empty, out_of_order_queue. */
1987 skb_rbtree_purge(&tp
->out_of_order_queue
);
1989 #ifdef CONFIG_TCP_MD5SIG
1990 /* Clean up the MD5 key list, if any */
1991 if (tp
->md5sig_info
) {
1992 tcp_clear_md5_list(sk
);
1993 kfree_rcu(rcu_dereference_protected(tp
->md5sig_info
, 1), rcu
);
1994 tp
->md5sig_info
= NULL
;
1998 /* Clean up a referenced TCP bind bucket. */
1999 if (inet_csk(sk
)->icsk_bind_hash
)
2002 BUG_ON(tp
->fastopen_rsk
);
2004 /* If socket is aborted during connect operation */
2005 tcp_free_fastopen_req(tp
);
2006 tcp_fastopen_destroy_cipher(sk
);
2007 tcp_saved_syn_free(tp
);
2009 sk_sockets_allocated_dec(sk
);
2011 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
2013 #ifdef CONFIG_PROC_FS
2014 /* Proc filesystem TCP sock list dumping. */
2017 * Get next listener socket follow cur. If cur is NULL, get first socket
2018 * starting from bucket given in st->bucket; when st->bucket is zero the
2019 * very first socket in the hash table is returned.
2021 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
2023 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2024 struct tcp_iter_state
*st
= seq
->private;
2025 struct net
*net
= seq_file_net(seq
);
2026 struct inet_listen_hashbucket
*ilb
;
2027 struct sock
*sk
= cur
;
2031 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2032 spin_lock(&ilb
->lock
);
2033 sk
= sk_head(&ilb
->head
);
2037 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2043 sk_for_each_from(sk
) {
2044 if (!net_eq(sock_net(sk
), net
))
2046 if (sk
->sk_family
== afinfo
->family
)
2049 spin_unlock(&ilb
->lock
);
2051 if (++st
->bucket
< INET_LHTABLE_SIZE
)
2056 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2058 struct tcp_iter_state
*st
= seq
->private;
2063 rc
= listening_get_next(seq
, NULL
);
2065 while (rc
&& *pos
) {
2066 rc
= listening_get_next(seq
, rc
);
2072 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
2074 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2078 * Get first established socket starting from bucket given in st->bucket.
2079 * If st->bucket is zero, the very first socket in the hash is returned.
2081 static void *established_get_first(struct seq_file
*seq
)
2083 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2084 struct tcp_iter_state
*st
= seq
->private;
2085 struct net
*net
= seq_file_net(seq
);
2089 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2091 struct hlist_nulls_node
*node
;
2092 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2094 /* Lockless fast path for the common case of empty buckets */
2095 if (empty_bucket(st
))
2099 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2100 if (sk
->sk_family
!= afinfo
->family
||
2101 !net_eq(sock_net(sk
), net
)) {
2107 spin_unlock_bh(lock
);
2113 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2115 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2116 struct sock
*sk
= cur
;
2117 struct hlist_nulls_node
*node
;
2118 struct tcp_iter_state
*st
= seq
->private;
2119 struct net
*net
= seq_file_net(seq
);
2124 sk
= sk_nulls_next(sk
);
2126 sk_nulls_for_each_from(sk
, node
) {
2127 if (sk
->sk_family
== afinfo
->family
&&
2128 net_eq(sock_net(sk
), net
))
2132 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2134 return established_get_first(seq
);
2137 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2139 struct tcp_iter_state
*st
= seq
->private;
2143 rc
= established_get_first(seq
);
2146 rc
= established_get_next(seq
, rc
);
2152 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2155 struct tcp_iter_state
*st
= seq
->private;
2157 st
->state
= TCP_SEQ_STATE_LISTENING
;
2158 rc
= listening_get_idx(seq
, &pos
);
2161 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2162 rc
= established_get_idx(seq
, pos
);
2168 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2170 struct tcp_iter_state
*st
= seq
->private;
2171 int offset
= st
->offset
;
2172 int orig_num
= st
->num
;
2175 switch (st
->state
) {
2176 case TCP_SEQ_STATE_LISTENING
:
2177 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2179 st
->state
= TCP_SEQ_STATE_LISTENING
;
2180 rc
= listening_get_next(seq
, NULL
);
2181 while (offset
-- && rc
)
2182 rc
= listening_get_next(seq
, rc
);
2186 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2188 case TCP_SEQ_STATE_ESTABLISHED
:
2189 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2191 rc
= established_get_first(seq
);
2192 while (offset
-- && rc
)
2193 rc
= established_get_next(seq
, rc
);
2201 void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2203 struct tcp_iter_state
*st
= seq
->private;
2206 if (*pos
&& *pos
== st
->last_pos
) {
2207 rc
= tcp_seek_last_pos(seq
);
2212 st
->state
= TCP_SEQ_STATE_LISTENING
;
2216 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2219 st
->last_pos
= *pos
;
2222 EXPORT_SYMBOL(tcp_seq_start
);
2224 void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2226 struct tcp_iter_state
*st
= seq
->private;
2229 if (v
== SEQ_START_TOKEN
) {
2230 rc
= tcp_get_idx(seq
, 0);
2234 switch (st
->state
) {
2235 case TCP_SEQ_STATE_LISTENING
:
2236 rc
= listening_get_next(seq
, v
);
2238 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2241 rc
= established_get_first(seq
);
2244 case TCP_SEQ_STATE_ESTABLISHED
:
2245 rc
= established_get_next(seq
, v
);
2250 st
->last_pos
= *pos
;
2253 EXPORT_SYMBOL(tcp_seq_next
);
2255 void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2257 struct tcp_iter_state
*st
= seq
->private;
2259 switch (st
->state
) {
2260 case TCP_SEQ_STATE_LISTENING
:
2261 if (v
!= SEQ_START_TOKEN
)
2262 spin_unlock(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2264 case TCP_SEQ_STATE_ESTABLISHED
:
2266 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2270 EXPORT_SYMBOL(tcp_seq_stop
);
2272 static void get_openreq4(const struct request_sock
*req
,
2273 struct seq_file
*f
, int i
)
2275 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2276 long delta
= req
->rsk_timer
.expires
- jiffies
;
2278 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2279 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2284 ntohs(ireq
->ir_rmt_port
),
2286 0, 0, /* could print option size, but that is af dependent. */
2287 1, /* timers active (only the expire timer) */
2288 jiffies_delta_to_clock_t(delta
),
2290 from_kuid_munged(seq_user_ns(f
),
2291 sock_i_uid(req
->rsk_listener
)),
2292 0, /* non standard timer */
2293 0, /* open_requests have no inode */
2298 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2301 unsigned long timer_expires
;
2302 const struct tcp_sock
*tp
= tcp_sk(sk
);
2303 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2304 const struct inet_sock
*inet
= inet_sk(sk
);
2305 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2306 __be32 dest
= inet
->inet_daddr
;
2307 __be32 src
= inet
->inet_rcv_saddr
;
2308 __u16 destp
= ntohs(inet
->inet_dport
);
2309 __u16 srcp
= ntohs(inet
->inet_sport
);
2313 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2314 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
2315 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2317 timer_expires
= icsk
->icsk_timeout
;
2318 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2320 timer_expires
= icsk
->icsk_timeout
;
2321 } else if (timer_pending(&sk
->sk_timer
)) {
2323 timer_expires
= sk
->sk_timer
.expires
;
2326 timer_expires
= jiffies
;
2329 state
= inet_sk_state_load(sk
);
2330 if (state
== TCP_LISTEN
)
2331 rx_queue
= sk
->sk_ack_backlog
;
2333 /* Because we don't lock the socket,
2334 * we might find a transient negative value.
2336 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2338 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2339 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2340 i
, src
, srcp
, dest
, destp
, state
,
2341 tp
->write_seq
- tp
->snd_una
,
2344 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2345 icsk
->icsk_retransmits
,
2346 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2347 icsk
->icsk_probes_out
,
2349 refcount_read(&sk
->sk_refcnt
), sk
,
2350 jiffies_to_clock_t(icsk
->icsk_rto
),
2351 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2352 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2354 state
== TCP_LISTEN
?
2355 fastopenq
->max_qlen
:
2356 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2359 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2360 struct seq_file
*f
, int i
)
2362 long delta
= tw
->tw_timer
.expires
- jiffies
;
2366 dest
= tw
->tw_daddr
;
2367 src
= tw
->tw_rcv_saddr
;
2368 destp
= ntohs(tw
->tw_dport
);
2369 srcp
= ntohs(tw
->tw_sport
);
2371 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2372 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2373 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2374 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2375 refcount_read(&tw
->tw_refcnt
), tw
);
2380 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2382 struct tcp_iter_state
*st
;
2383 struct sock
*sk
= v
;
2385 seq_setwidth(seq
, TMPSZ
- 1);
2386 if (v
== SEQ_START_TOKEN
) {
2387 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2388 "rx_queue tr tm->when retrnsmt uid timeout "
2394 if (sk
->sk_state
== TCP_TIME_WAIT
)
2395 get_timewait4_sock(v
, seq
, st
->num
);
2396 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2397 get_openreq4(v
, seq
, st
->num
);
2399 get_tcp4_sock(v
, seq
, st
->num
);
2405 static const struct seq_operations tcp4_seq_ops
= {
2406 .show
= tcp4_seq_show
,
2407 .start
= tcp_seq_start
,
2408 .next
= tcp_seq_next
,
2409 .stop
= tcp_seq_stop
,
2412 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2416 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2418 if (!proc_create_net_data("tcp", 0444, net
->proc_net
, &tcp4_seq_ops
,
2419 sizeof(struct tcp_iter_state
), &tcp4_seq_afinfo
))
2424 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2426 remove_proc_entry("tcp", net
->proc_net
);
2429 static struct pernet_operations tcp4_net_ops
= {
2430 .init
= tcp4_proc_init_net
,
2431 .exit
= tcp4_proc_exit_net
,
2434 int __init
tcp4_proc_init(void)
2436 return register_pernet_subsys(&tcp4_net_ops
);
2439 void tcp4_proc_exit(void)
2441 unregister_pernet_subsys(&tcp4_net_ops
);
2443 #endif /* CONFIG_PROC_FS */
2445 struct proto tcp_prot
= {
2447 .owner
= THIS_MODULE
,
2449 .pre_connect
= tcp_v4_pre_connect
,
2450 .connect
= tcp_v4_connect
,
2451 .disconnect
= tcp_disconnect
,
2452 .accept
= inet_csk_accept
,
2454 .init
= tcp_v4_init_sock
,
2455 .destroy
= tcp_v4_destroy_sock
,
2456 .shutdown
= tcp_shutdown
,
2457 .setsockopt
= tcp_setsockopt
,
2458 .getsockopt
= tcp_getsockopt
,
2459 .keepalive
= tcp_set_keepalive
,
2460 .recvmsg
= tcp_recvmsg
,
2461 .sendmsg
= tcp_sendmsg
,
2462 .sendpage
= tcp_sendpage
,
2463 .backlog_rcv
= tcp_v4_do_rcv
,
2464 .release_cb
= tcp_release_cb
,
2466 .unhash
= inet_unhash
,
2467 .get_port
= inet_csk_get_port
,
2468 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2469 .leave_memory_pressure
= tcp_leave_memory_pressure
,
2470 .stream_memory_free
= tcp_stream_memory_free
,
2471 .sockets_allocated
= &tcp_sockets_allocated
,
2472 .orphan_count
= &tcp_orphan_count
,
2473 .memory_allocated
= &tcp_memory_allocated
,
2474 .memory_pressure
= &tcp_memory_pressure
,
2475 .sysctl_mem
= sysctl_tcp_mem
,
2476 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
2477 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
2478 .max_header
= MAX_TCP_HEADER
,
2479 .obj_size
= sizeof(struct tcp_sock
),
2480 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
2481 .twsk_prot
= &tcp_timewait_sock_ops
,
2482 .rsk_prot
= &tcp_request_sock_ops
,
2483 .h
.hashinfo
= &tcp_hashinfo
,
2484 .no_autobind
= true,
2485 #ifdef CONFIG_COMPAT
2486 .compat_setsockopt
= compat_tcp_setsockopt
,
2487 .compat_getsockopt
= compat_tcp_getsockopt
,
2489 .diag_destroy
= tcp_abort
,
2491 EXPORT_SYMBOL(tcp_prot
);
2493 static void __net_exit
tcp_sk_exit(struct net
*net
)
2497 module_put(net
->ipv4
.tcp_congestion_control
->owner
);
2499 for_each_possible_cpu(cpu
)
2500 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2501 free_percpu(net
->ipv4
.tcp_sk
);
2504 static int __net_init
tcp_sk_init(struct net
*net
)
2508 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2509 if (!net
->ipv4
.tcp_sk
)
2512 for_each_possible_cpu(cpu
) {
2515 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2519 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
2520 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2523 net
->ipv4
.sysctl_tcp_ecn
= 2;
2524 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2526 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2527 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2528 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2530 net
->ipv4
.sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
2531 net
->ipv4
.sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
2532 net
->ipv4
.sysctl_tcp_keepalive_intvl
= TCP_KEEPALIVE_INTVL
;
2534 net
->ipv4
.sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
2535 net
->ipv4
.sysctl_tcp_synack_retries
= TCP_SYNACK_RETRIES
;
2536 net
->ipv4
.sysctl_tcp_syncookies
= 1;
2537 net
->ipv4
.sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
2538 net
->ipv4
.sysctl_tcp_retries1
= TCP_RETR1
;
2539 net
->ipv4
.sysctl_tcp_retries2
= TCP_RETR2
;
2540 net
->ipv4
.sysctl_tcp_orphan_retries
= 0;
2541 net
->ipv4
.sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
2542 net
->ipv4
.sysctl_tcp_notsent_lowat
= UINT_MAX
;
2543 net
->ipv4
.sysctl_tcp_tw_reuse
= 2;
2545 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
2546 net
->ipv4
.tcp_death_row
.sysctl_max_tw_buckets
= (cnt
+ 1) / 2;
2547 net
->ipv4
.tcp_death_row
.hashinfo
= &tcp_hashinfo
;
2549 net
->ipv4
.sysctl_max_syn_backlog
= max(128, cnt
/ 256);
2550 net
->ipv4
.sysctl_tcp_sack
= 1;
2551 net
->ipv4
.sysctl_tcp_window_scaling
= 1;
2552 net
->ipv4
.sysctl_tcp_timestamps
= 1;
2553 net
->ipv4
.sysctl_tcp_early_retrans
= 3;
2554 net
->ipv4
.sysctl_tcp_recovery
= TCP_RACK_LOSS_DETECTION
;
2555 net
->ipv4
.sysctl_tcp_slow_start_after_idle
= 1; /* By default, RFC2861 behavior. */
2556 net
->ipv4
.sysctl_tcp_retrans_collapse
= 1;
2557 net
->ipv4
.sysctl_tcp_max_reordering
= 300;
2558 net
->ipv4
.sysctl_tcp_dsack
= 1;
2559 net
->ipv4
.sysctl_tcp_app_win
= 31;
2560 net
->ipv4
.sysctl_tcp_adv_win_scale
= 1;
2561 net
->ipv4
.sysctl_tcp_frto
= 2;
2562 net
->ipv4
.sysctl_tcp_moderate_rcvbuf
= 1;
2563 /* This limits the percentage of the congestion window which we
2564 * will allow a single TSO frame to consume. Building TSO frames
2565 * which are too large can cause TCP streams to be bursty.
2567 net
->ipv4
.sysctl_tcp_tso_win_divisor
= 3;
2568 /* Default TSQ limit of four TSO segments */
2569 net
->ipv4
.sysctl_tcp_limit_output_bytes
= 262144;
2570 /* rfc5961 challenge ack rate limiting */
2571 net
->ipv4
.sysctl_tcp_challenge_ack_limit
= 1000;
2572 net
->ipv4
.sysctl_tcp_min_tso_segs
= 2;
2573 net
->ipv4
.sysctl_tcp_min_rtt_wlen
= 300;
2574 net
->ipv4
.sysctl_tcp_autocorking
= 1;
2575 net
->ipv4
.sysctl_tcp_invalid_ratelimit
= HZ
/2;
2576 net
->ipv4
.sysctl_tcp_pacing_ss_ratio
= 200;
2577 net
->ipv4
.sysctl_tcp_pacing_ca_ratio
= 120;
2578 if (net
!= &init_net
) {
2579 memcpy(net
->ipv4
.sysctl_tcp_rmem
,
2580 init_net
.ipv4
.sysctl_tcp_rmem
,
2581 sizeof(init_net
.ipv4
.sysctl_tcp_rmem
));
2582 memcpy(net
->ipv4
.sysctl_tcp_wmem
,
2583 init_net
.ipv4
.sysctl_tcp_wmem
,
2584 sizeof(init_net
.ipv4
.sysctl_tcp_wmem
));
2586 net
->ipv4
.sysctl_tcp_comp_sack_delay_ns
= NSEC_PER_MSEC
;
2587 net
->ipv4
.sysctl_tcp_comp_sack_nr
= 44;
2588 net
->ipv4
.sysctl_tcp_fastopen
= TFO_CLIENT_ENABLE
;
2589 spin_lock_init(&net
->ipv4
.tcp_fastopen_ctx_lock
);
2590 net
->ipv4
.sysctl_tcp_fastopen_blackhole_timeout
= 60 * 60;
2591 atomic_set(&net
->ipv4
.tfo_active_disable_times
, 0);
2593 /* Reno is always built in */
2594 if (!net_eq(net
, &init_net
) &&
2595 try_module_get(init_net
.ipv4
.tcp_congestion_control
->owner
))
2596 net
->ipv4
.tcp_congestion_control
= init_net
.ipv4
.tcp_congestion_control
;
2598 net
->ipv4
.tcp_congestion_control
= &tcp_reno
;
2607 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2611 inet_twsk_purge(&tcp_hashinfo
, AF_INET
);
2613 list_for_each_entry(net
, net_exit_list
, exit_list
)
2614 tcp_fastopen_ctx_destroy(net
);
2617 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2618 .init
= tcp_sk_init
,
2619 .exit
= tcp_sk_exit
,
2620 .exit_batch
= tcp_sk_exit_batch
,
2623 void __init
tcp_v4_init(void)
2625 if (register_pernet_subsys(&tcp_sk_ops
))
2626 panic("Failed to create the TCP control socket.\n");