2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
95 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
98 struct inet_hashinfo tcp_hashinfo
;
99 EXPORT_SYMBOL(tcp_hashinfo
);
101 static inline __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
103 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
106 tcp_hdr(skb
)->source
);
109 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
111 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
112 struct tcp_sock
*tp
= tcp_sk(sk
);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw
->tw_ts_recent_stamp
&&
126 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
127 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
128 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
129 if (tp
->write_seq
== 0)
131 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
132 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
144 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
145 struct inet_sock
*inet
= inet_sk(sk
);
146 struct tcp_sock
*tp
= tcp_sk(sk
);
147 __be16 orig_sport
, orig_dport
;
148 __be32 daddr
, nexthop
;
152 struct ip_options_rcu
*inet_opt
;
154 if (addr_len
< sizeof(struct sockaddr_in
))
157 if (usin
->sin_family
!= AF_INET
)
158 return -EAFNOSUPPORT
;
160 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
161 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
162 sock_owned_by_user(sk
));
163 if (inet_opt
&& inet_opt
->opt
.srr
) {
166 nexthop
= inet_opt
->opt
.faddr
;
169 orig_sport
= inet
->inet_sport
;
170 orig_dport
= usin
->sin_port
;
171 fl4
= &inet
->cork
.fl
.u
.ip4
;
172 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
173 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
175 orig_sport
, orig_dport
, sk
, true);
178 if (err
== -ENETUNREACH
)
179 IP_INC_STATS_BH(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
183 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
188 if (!inet_opt
|| !inet_opt
->opt
.srr
)
191 if (!inet
->inet_saddr
)
192 inet
->inet_saddr
= fl4
->saddr
;
193 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
195 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
196 /* Reset inherited state */
197 tp
->rx_opt
.ts_recent
= 0;
198 tp
->rx_opt
.ts_recent_stamp
= 0;
199 if (likely(!tp
->repair
))
203 if (tcp_death_row
.sysctl_tw_recycle
&&
204 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
205 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
207 inet
->inet_dport
= usin
->sin_port
;
208 inet
->inet_daddr
= daddr
;
210 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
212 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
214 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk
, TCP_SYN_SENT
);
222 err
= inet_hash_connect(&tcp_death_row
, sk
);
226 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
227 inet
->inet_sport
, inet
->inet_dport
, sk
);
233 /* OK, now commit destination to socket. */
234 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
235 sk_setup_caps(sk
, &rt
->dst
);
237 if (!tp
->write_seq
&& likely(!tp
->repair
))
238 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
243 inet
->inet_id
= tp
->write_seq
^ jiffies
;
245 err
= tcp_connect(sk
);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk
, TCP_CLOSE
);
260 sk
->sk_route_caps
= 0;
261 inet
->inet_dport
= 0;
264 EXPORT_SYMBOL(tcp_v4_connect
);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 static void tcp_v4_mtu_reduced(struct sock
*sk
)
273 struct dst_entry
*dst
;
274 struct inet_sock
*inet
= inet_sk(sk
);
275 u32 mtu
= tcp_sk(sk
)->mtu_info
;
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk
->sk_state
== TCP_LISTEN
)
284 dst
= inet_csk_update_pmtu(sk
, mtu
);
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
291 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
292 sk
->sk_err_soft
= EMSGSIZE
;
296 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
297 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
298 tcp_sync_mss(sk
, mtu
);
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
305 tcp_simple_retransmit(sk
);
306 } /* else let the usual retransmit timer handle it */
309 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
311 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
314 dst
->ops
->redirect(dst
, sk
, skb
);
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
333 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
335 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
336 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
337 struct inet_connection_sock
*icsk
;
339 struct inet_sock
*inet
;
340 const int type
= icmp_hdr(icmp_skb
)->type
;
341 const int code
= icmp_hdr(icmp_skb
)->code
;
344 struct request_sock
*req
;
348 struct net
*net
= dev_net(icmp_skb
->dev
);
350 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
351 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
355 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
356 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
358 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
361 if (sk
->sk_state
== TCP_TIME_WAIT
) {
362 inet_twsk_put(inet_twsk(sk
));
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
372 if (sock_owned_by_user(sk
)) {
373 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
374 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
376 if (sk
->sk_state
== TCP_CLOSE
)
379 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
380 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
386 req
= tp
->fastopen_rsk
;
387 seq
= ntohl(th
->seq
);
388 if (sk
->sk_state
!= TCP_LISTEN
&&
389 !between(seq
, tp
->snd_una
, tp
->snd_nxt
) &&
390 (req
== NULL
|| seq
!= tcp_rsk(req
)->snt_isn
)) {
391 /* For a Fast Open socket, allow seq to be snt_isn. */
392 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
398 do_redirect(icmp_skb
, sk
);
400 case ICMP_SOURCE_QUENCH
:
401 /* Just silently ignore these. */
403 case ICMP_PARAMETERPROB
:
406 case ICMP_DEST_UNREACH
:
407 if (code
> NR_ICMP_UNREACH
)
410 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
412 if (!sock_owned_by_user(sk
)) {
413 tcp_v4_mtu_reduced(sk
);
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
421 err
= icmp_err_convert
[code
].errno
;
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
426 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
430 /* XXX (TFO) - revisit the following logic for TFO */
432 if (sock_owned_by_user(sk
))
435 icsk
->icsk_backoff
--;
436 inet_csk(sk
)->icsk_rto
= (tp
->srtt
? __tcp_set_rto(tp
) :
437 TCP_TIMEOUT_INIT
) << icsk
->icsk_backoff
;
440 skb
= tcp_write_queue_head(sk
);
443 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
444 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
447 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
448 remaining
, TCP_RTO_MAX
);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk
);
456 case ICMP_TIME_EXCEEDED
:
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
468 if (req
&& req
->sk
== NULL
)
471 switch (sk
->sk_state
) {
472 struct request_sock
*req
, **prev
;
474 if (sock_owned_by_user(sk
))
477 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
478 iph
->daddr
, iph
->saddr
);
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
487 if (seq
!= tcp_rsk(req
)->snt_isn
) {
488 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
498 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
499 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
503 case TCP_SYN_RECV
: /* Cannot happen.
504 It can f.e. if SYNs crossed,
507 if (!sock_owned_by_user(sk
)) {
510 sk
->sk_error_report(sk
);
514 sk
->sk_err_soft
= err
;
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
531 * Now we are in compliance with RFCs.
536 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
538 sk
->sk_error_report(sk
);
539 } else { /* Only an error on timeout */
540 sk
->sk_err_soft
= err
;
548 static void __tcp_v4_send_check(struct sk_buff
*skb
,
549 __be32 saddr
, __be32 daddr
)
551 struct tcphdr
*th
= tcp_hdr(skb
);
553 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
554 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
555 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
556 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
558 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
568 const struct inet_sock
*inet
= inet_sk(sk
);
570 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
572 EXPORT_SYMBOL(tcp_v4_send_check
);
574 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
576 const struct iphdr
*iph
;
579 if (!pskb_may_pull(skb
, sizeof(*th
)))
586 skb
->ip_summed
= CHECKSUM_PARTIAL
;
587 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
592 * This routine will send an RST to the other tcp.
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
604 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
606 const struct tcphdr
*th
= tcp_hdr(skb
);
609 #ifdef CONFIG_TCP_MD5SIG
610 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
613 struct ip_reply_arg arg
;
614 #ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key
*key
;
616 const __u8
*hash_location
= NULL
;
617 unsigned char newhash
[16];
619 struct sock
*sk1
= NULL
;
623 /* Never send a reset in response to a reset. */
627 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
630 /* Swap the send and the receive. */
631 memset(&rep
, 0, sizeof(rep
));
632 rep
.th
.dest
= th
->source
;
633 rep
.th
.source
= th
->dest
;
634 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
638 rep
.th
.seq
= th
->ack_seq
;
641 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
642 skb
->len
- (th
->doff
<< 2));
645 memset(&arg
, 0, sizeof(arg
));
646 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
647 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
649 #ifdef CONFIG_TCP_MD5SIG
650 hash_location
= tcp_parse_md5sig_option(th
);
651 if (!sk
&& hash_location
) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1
= __inet_lookup_listener(dev_net(skb_dst(skb
)->dev
),
660 &tcp_hashinfo
, ip_hdr(skb
)->daddr
,
661 ntohs(th
->source
), inet_iif(skb
));
662 /* don't send rst if it can't find key */
666 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
667 &ip_hdr(skb
)->saddr
, AF_INET
);
671 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
672 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
675 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
681 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
683 (TCPOPT_MD5SIG
<< 8) |
685 /* Update length and the length the header thinks exists */
686 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
687 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
689 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
690 key
, ip_hdr(skb
)->saddr
,
691 ip_hdr(skb
)->daddr
, &rep
.th
);
694 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
695 ip_hdr(skb
)->saddr
, /* XXX */
696 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
697 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
698 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
699 /* When socket is gone, all binding information is lost.
700 * routing might fail in this case. No choice here, if we choose to force
701 * input interface, we will misroute in case of asymmetric route.
704 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
706 net
= dev_net(skb_dst(skb
)->dev
);
707 arg
.tos
= ip_hdr(skb
)->tos
;
708 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
709 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
711 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
712 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
714 #ifdef CONFIG_TCP_MD5SIG
723 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
724 outside socket context is ugly, certainly. What can I do?
727 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
728 u32 win
, u32 ts
, int oif
,
729 struct tcp_md5sig_key
*key
,
730 int reply_flags
, u8 tos
)
732 const struct tcphdr
*th
= tcp_hdr(skb
);
735 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
736 #ifdef CONFIG_TCP_MD5SIG
737 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
741 struct ip_reply_arg arg
;
742 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
744 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
745 memset(&arg
, 0, sizeof(arg
));
747 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
748 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
750 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
751 (TCPOPT_TIMESTAMP
<< 8) |
753 rep
.opt
[1] = htonl(tcp_time_stamp
);
754 rep
.opt
[2] = htonl(ts
);
755 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
758 /* Swap the send and the receive. */
759 rep
.th
.dest
= th
->source
;
760 rep
.th
.source
= th
->dest
;
761 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
762 rep
.th
.seq
= htonl(seq
);
763 rep
.th
.ack_seq
= htonl(ack
);
765 rep
.th
.window
= htons(win
);
767 #ifdef CONFIG_TCP_MD5SIG
769 int offset
= (ts
) ? 3 : 0;
771 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
773 (TCPOPT_MD5SIG
<< 8) |
775 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
776 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
778 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
779 key
, ip_hdr(skb
)->saddr
,
780 ip_hdr(skb
)->daddr
, &rep
.th
);
783 arg
.flags
= reply_flags
;
784 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
785 ip_hdr(skb
)->saddr
, /* XXX */
786 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
787 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
789 arg
.bound_dev_if
= oif
;
791 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
792 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
794 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
797 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
799 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
800 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
802 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
803 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
806 tcp_twsk_md5_key(tcptw
),
807 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
814 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
815 struct request_sock
*req
)
817 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
818 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
820 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
821 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
822 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
825 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
827 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
832 * Send a SYN-ACK after having received a SYN.
833 * This still operates on a request_sock only, not on a big
836 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
837 struct request_sock
*req
,
838 struct request_values
*rvp
,
842 const struct inet_request_sock
*ireq
= inet_rsk(req
);
845 struct sk_buff
* skb
;
847 /* First, grab a route. */
848 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
851 skb
= tcp_make_synack(sk
, dst
, req
, rvp
, NULL
);
854 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
856 skb_set_queue_mapping(skb
, queue_mapping
);
857 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
860 err
= net_xmit_eval(err
);
861 if (!tcp_rsk(req
)->snt_synack
&& !err
)
862 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
868 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
869 struct request_values
*rvp
)
871 int res
= tcp_v4_send_synack(sk
, NULL
, req
, rvp
, 0, false);
874 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
879 * IPv4 request_sock destructor.
881 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
883 kfree(inet_rsk(req
)->opt
);
887 * Return true if a syncookie should be sent
889 bool tcp_syn_flood_action(struct sock
*sk
,
890 const struct sk_buff
*skb
,
893 const char *msg
= "Dropping request";
894 bool want_cookie
= false;
895 struct listen_sock
*lopt
;
899 #ifdef CONFIG_SYN_COOKIES
900 if (sysctl_tcp_syncookies
) {
901 msg
= "Sending cookies";
903 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
906 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
908 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
909 if (!lopt
->synflood_warned
) {
910 lopt
->synflood_warned
= 1;
911 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
912 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
916 EXPORT_SYMBOL(tcp_syn_flood_action
);
919 * Save and compile IPv4 options into the request_sock if needed.
921 static struct ip_options_rcu
*tcp_v4_save_options(struct sk_buff
*skb
)
923 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
924 struct ip_options_rcu
*dopt
= NULL
;
926 if (opt
&& opt
->optlen
) {
927 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
929 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
931 if (ip_options_echo(&dopt
->opt
, skb
)) {
940 #ifdef CONFIG_TCP_MD5SIG
942 * RFC2385 MD5 checksumming requires a mapping of
943 * IP address->MD5 Key.
944 * We need to maintain these in the sk structure.
947 /* Find the Key structure for an address. */
948 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
949 const union tcp_md5_addr
*addr
,
952 struct tcp_sock
*tp
= tcp_sk(sk
);
953 struct tcp_md5sig_key
*key
;
954 struct hlist_node
*pos
;
955 unsigned int size
= sizeof(struct in_addr
);
956 struct tcp_md5sig_info
*md5sig
;
958 /* caller either holds rcu_read_lock() or socket lock */
959 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
960 sock_owned_by_user(sk
) ||
961 lockdep_is_held(&sk
->sk_lock
.slock
));
964 #if IS_ENABLED(CONFIG_IPV6)
965 if (family
== AF_INET6
)
966 size
= sizeof(struct in6_addr
);
968 hlist_for_each_entry_rcu(key
, pos
, &md5sig
->head
, node
) {
969 if (key
->family
!= family
)
971 if (!memcmp(&key
->addr
, addr
, size
))
976 EXPORT_SYMBOL(tcp_md5_do_lookup
);
978 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
979 struct sock
*addr_sk
)
981 union tcp_md5_addr
*addr
;
983 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
984 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
986 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
988 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
989 struct request_sock
*req
)
991 union tcp_md5_addr
*addr
;
993 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->rmt_addr
;
994 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
997 /* This can be called on a newly created socket, from other files */
998 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
999 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
1001 /* Add Key to the list */
1002 struct tcp_md5sig_key
*key
;
1003 struct tcp_sock
*tp
= tcp_sk(sk
);
1004 struct tcp_md5sig_info
*md5sig
;
1006 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&addr
, AF_INET
);
1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key
->key
, newkey
, newkeylen
);
1010 key
->keylen
= newkeylen
;
1014 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1015 sock_owned_by_user(sk
));
1017 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1021 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1022 INIT_HLIST_HEAD(&md5sig
->head
);
1023 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1026 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1029 if (hlist_empty(&md5sig
->head
) && !tcp_alloc_md5sig_pool(sk
)) {
1030 sock_kfree_s(sk
, key
, sizeof(*key
));
1034 memcpy(key
->key
, newkey
, newkeylen
);
1035 key
->keylen
= newkeylen
;
1036 key
->family
= family
;
1037 memcpy(&key
->addr
, addr
,
1038 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1039 sizeof(struct in_addr
));
1040 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1043 EXPORT_SYMBOL(tcp_md5_do_add
);
1045 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
1047 struct tcp_sock
*tp
= tcp_sk(sk
);
1048 struct tcp_md5sig_key
*key
;
1049 struct tcp_md5sig_info
*md5sig
;
1051 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&addr
, AF_INET
);
1054 hlist_del_rcu(&key
->node
);
1055 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1056 kfree_rcu(key
, rcu
);
1057 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1058 sock_owned_by_user(sk
));
1059 if (hlist_empty(&md5sig
->head
))
1060 tcp_free_md5sig_pool();
1063 EXPORT_SYMBOL(tcp_md5_do_del
);
1065 static void tcp_clear_md5_list(struct sock
*sk
)
1067 struct tcp_sock
*tp
= tcp_sk(sk
);
1068 struct tcp_md5sig_key
*key
;
1069 struct hlist_node
*pos
, *n
;
1070 struct tcp_md5sig_info
*md5sig
;
1072 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1074 if (!hlist_empty(&md5sig
->head
))
1075 tcp_free_md5sig_pool();
1076 hlist_for_each_entry_safe(key
, pos
, n
, &md5sig
->head
, node
) {
1077 hlist_del_rcu(&key
->node
);
1078 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1079 kfree_rcu(key
, rcu
);
1083 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1086 struct tcp_md5sig cmd
;
1087 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1089 if (optlen
< sizeof(cmd
))
1092 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1095 if (sin
->sin_family
!= AF_INET
)
1098 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
)
1099 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1102 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1105 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1106 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1110 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1111 __be32 daddr
, __be32 saddr
, int nbytes
)
1113 struct tcp4_pseudohdr
*bp
;
1114 struct scatterlist sg
;
1116 bp
= &hp
->md5_blk
.ip4
;
1119 * 1. the TCP pseudo-header (in the order: source IP address,
1120 * destination IP address, zero-padded protocol number, and
1126 bp
->protocol
= IPPROTO_TCP
;
1127 bp
->len
= cpu_to_be16(nbytes
);
1129 sg_init_one(&sg
, bp
, sizeof(*bp
));
1130 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1133 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1134 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1136 struct tcp_md5sig_pool
*hp
;
1137 struct hash_desc
*desc
;
1139 hp
= tcp_get_md5sig_pool();
1141 goto clear_hash_noput
;
1142 desc
= &hp
->md5_desc
;
1144 if (crypto_hash_init(desc
))
1146 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1148 if (tcp_md5_hash_header(hp
, th
))
1150 if (tcp_md5_hash_key(hp
, key
))
1152 if (crypto_hash_final(desc
, md5_hash
))
1155 tcp_put_md5sig_pool();
1159 tcp_put_md5sig_pool();
1161 memset(md5_hash
, 0, 16);
1165 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1166 const struct sock
*sk
, const struct request_sock
*req
,
1167 const struct sk_buff
*skb
)
1169 struct tcp_md5sig_pool
*hp
;
1170 struct hash_desc
*desc
;
1171 const struct tcphdr
*th
= tcp_hdr(skb
);
1172 __be32 saddr
, daddr
;
1175 saddr
= inet_sk(sk
)->inet_saddr
;
1176 daddr
= inet_sk(sk
)->inet_daddr
;
1178 saddr
= inet_rsk(req
)->loc_addr
;
1179 daddr
= inet_rsk(req
)->rmt_addr
;
1181 const struct iphdr
*iph
= ip_hdr(skb
);
1186 hp
= tcp_get_md5sig_pool();
1188 goto clear_hash_noput
;
1189 desc
= &hp
->md5_desc
;
1191 if (crypto_hash_init(desc
))
1194 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1196 if (tcp_md5_hash_header(hp
, th
))
1198 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1200 if (tcp_md5_hash_key(hp
, key
))
1202 if (crypto_hash_final(desc
, md5_hash
))
1205 tcp_put_md5sig_pool();
1209 tcp_put_md5sig_pool();
1211 memset(md5_hash
, 0, 16);
1214 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1216 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1219 * This gets called for each TCP segment that arrives
1220 * so we want to be efficient.
1221 * We have 3 drop cases:
1222 * o No MD5 hash and one expected.
1223 * o MD5 hash and we're not expecting one.
1224 * o MD5 hash and its wrong.
1226 const __u8
*hash_location
= NULL
;
1227 struct tcp_md5sig_key
*hash_expected
;
1228 const struct iphdr
*iph
= ip_hdr(skb
);
1229 const struct tcphdr
*th
= tcp_hdr(skb
);
1231 unsigned char newhash
[16];
1233 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1235 hash_location
= tcp_parse_md5sig_option(th
);
1237 /* We've parsed the options - do we have a hash? */
1238 if (!hash_expected
&& !hash_location
)
1241 if (hash_expected
&& !hash_location
) {
1242 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1246 if (!hash_expected
&& hash_location
) {
1247 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1251 /* Okay, so this is hash_expected and hash_location -
1252 * so we need to calculate the checksum.
1254 genhash
= tcp_v4_md5_hash_skb(newhash
,
1258 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1259 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1260 &iph
->saddr
, ntohs(th
->source
),
1261 &iph
->daddr
, ntohs(th
->dest
),
1262 genhash
? " tcp_v4_calc_md5_hash failed"
1271 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1273 .obj_size
= sizeof(struct tcp_request_sock
),
1274 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1275 .send_ack
= tcp_v4_reqsk_send_ack
,
1276 .destructor
= tcp_v4_reqsk_destructor
,
1277 .send_reset
= tcp_v4_send_reset
,
1278 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1281 #ifdef CONFIG_TCP_MD5SIG
1282 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1283 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1284 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1288 static bool tcp_fastopen_check(struct sock
*sk
, struct sk_buff
*skb
,
1289 struct request_sock
*req
,
1290 struct tcp_fastopen_cookie
*foc
,
1291 struct tcp_fastopen_cookie
*valid_foc
)
1293 bool skip_cookie
= false;
1294 struct fastopen_queue
*fastopenq
;
1296 if (likely(!fastopen_cookie_present(foc
))) {
1297 /* See include/net/tcp.h for the meaning of these knobs */
1298 if ((sysctl_tcp_fastopen
& TFO_SERVER_ALWAYS
) ||
1299 ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
) &&
1300 (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1)))
1301 skip_cookie
= true; /* no cookie to validate */
1305 fastopenq
= inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
1306 /* A FO option is present; bump the counter. */
1307 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVE
);
1309 /* Make sure the listener has enabled fastopen, and we don't
1310 * exceed the max # of pending TFO requests allowed before trying
1311 * to validating the cookie in order to avoid burning CPU cycles
1314 * XXX (TFO) - The implication of checking the max_qlen before
1315 * processing a cookie request is that clients can't differentiate
1316 * between qlen overflow causing Fast Open to be disabled
1317 * temporarily vs a server not supporting Fast Open at all.
1319 if ((sysctl_tcp_fastopen
& TFO_SERVER_ENABLE
) == 0 ||
1320 fastopenq
== NULL
|| fastopenq
->max_qlen
== 0)
1323 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
1324 struct request_sock
*req1
;
1325 spin_lock(&fastopenq
->lock
);
1326 req1
= fastopenq
->rskq_rst_head
;
1327 if ((req1
== NULL
) || time_after(req1
->expires
, jiffies
)) {
1328 spin_unlock(&fastopenq
->lock
);
1329 NET_INC_STATS_BH(sock_net(sk
),
1330 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
1331 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1335 fastopenq
->rskq_rst_head
= req1
->dl_next
;
1337 spin_unlock(&fastopenq
->lock
);
1341 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1344 if (foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
) {
1345 if ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_CHKED
) == 0) {
1346 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1347 if ((valid_foc
->len
!= TCP_FASTOPEN_COOKIE_SIZE
) ||
1348 memcmp(&foc
->val
[0], &valid_foc
->val
[0],
1349 TCP_FASTOPEN_COOKIE_SIZE
) != 0)
1351 valid_foc
->len
= -1;
1353 /* Acknowledge the data received from the peer. */
1354 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1356 } else if (foc
->len
== 0) { /* Client requesting a cookie */
1357 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1358 NET_INC_STATS_BH(sock_net(sk
),
1359 LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
1361 /* Client sent a cookie with wrong size. Treat it
1362 * the same as invalid and return a valid one.
1364 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1369 static int tcp_v4_conn_req_fastopen(struct sock
*sk
,
1370 struct sk_buff
*skb
,
1371 struct sk_buff
*skb_synack
,
1372 struct request_sock
*req
,
1373 struct request_values
*rvp
)
1375 struct tcp_sock
*tp
= tcp_sk(sk
);
1376 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
1377 const struct inet_request_sock
*ireq
= inet_rsk(req
);
1381 req
->num_retrans
= 0;
1382 req
->num_timeout
= 0;
1385 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
);
1386 if (child
== NULL
) {
1387 NET_INC_STATS_BH(sock_net(sk
),
1388 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1389 kfree_skb(skb_synack
);
1392 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1393 ireq
->rmt_addr
, ireq
->opt
);
1394 err
= net_xmit_eval(err
);
1396 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1397 /* XXX (TFO) - is it ok to ignore error and continue? */
1399 spin_lock(&queue
->fastopenq
->lock
);
1400 queue
->fastopenq
->qlen
++;
1401 spin_unlock(&queue
->fastopenq
->lock
);
1403 /* Initialize the child socket. Have to fix some values to take
1404 * into account the child is a Fast Open socket and is created
1405 * only out of the bits carried in the SYN packet.
1409 tp
->fastopen_rsk
= req
;
1410 /* Do a hold on the listner sk so that if the listener is being
1411 * closed, the child that has been accepted can live on and still
1412 * access listen_lock.
1415 tcp_rsk(req
)->listener
= sk
;
1417 /* RFC1323: The window in SYN & SYN/ACK segments is never
1418 * scaled. So correct it appropriately.
1420 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
1422 /* Activate the retrans timer so that SYNACK can be retransmitted.
1423 * The request socket is not added to the SYN table of the parent
1424 * because it's been added to the accept queue directly.
1426 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
1427 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
1429 /* Add the child socket directly into the accept queue */
1430 inet_csk_reqsk_queue_add(sk
, req
, child
);
1432 /* Now finish processing the fastopen child socket. */
1433 inet_csk(child
)->icsk_af_ops
->rebuild_header(child
);
1434 tcp_init_congestion_control(child
);
1435 tcp_mtup_init(child
);
1436 tcp_init_buffer_space(child
);
1437 tcp_init_metrics(child
);
1439 /* Queue the data carried in the SYN packet. We need to first
1440 * bump skb's refcnt because the caller will attempt to free it.
1442 * XXX (TFO) - we honor a zero-payload TFO request for now.
1443 * (Any reason not to?)
1445 if (TCP_SKB_CB(skb
)->end_seq
== TCP_SKB_CB(skb
)->seq
+ 1) {
1446 /* Don't queue the skb if there is no payload in SYN.
1447 * XXX (TFO) - How about SYN+FIN?
1449 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1453 __skb_pull(skb
, tcp_hdr(skb
)->doff
* 4);
1454 skb_set_owner_r(skb
, child
);
1455 __skb_queue_tail(&child
->sk_receive_queue
, skb
);
1456 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1457 tp
->syn_data_acked
= 1;
1459 sk
->sk_data_ready(sk
, 0);
1460 bh_unlock_sock(child
);
1462 WARN_ON(req
->sk
== NULL
);
1466 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1468 struct tcp_extend_values tmp_ext
;
1469 struct tcp_options_received tmp_opt
;
1470 const u8
*hash_location
;
1471 struct request_sock
*req
;
1472 struct inet_request_sock
*ireq
;
1473 struct tcp_sock
*tp
= tcp_sk(sk
);
1474 struct dst_entry
*dst
= NULL
;
1475 __be32 saddr
= ip_hdr(skb
)->saddr
;
1476 __be32 daddr
= ip_hdr(skb
)->daddr
;
1477 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1478 bool want_cookie
= false;
1480 struct tcp_fastopen_cookie foc
= { .len
= -1 };
1481 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
1482 struct sk_buff
*skb_synack
;
1485 /* Never answer to SYNs send to broadcast or multicast */
1486 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1489 /* TW buckets are converted to open requests without
1490 * limitations, they conserve resources and peer is
1491 * evidently real one.
1493 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1494 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCP");
1499 /* Accept backlog is full. If we have already queued enough
1500 * of warm entries in syn queue, drop request. It is better than
1501 * clogging syn queue with openreqs with exponentially increasing
1504 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
1505 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1509 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1513 #ifdef CONFIG_TCP_MD5SIG
1514 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1517 tcp_clear_options(&tmp_opt
);
1518 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1519 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1520 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0,
1521 want_cookie
? NULL
: &foc
);
1523 if (tmp_opt
.cookie_plus
> 0 &&
1524 tmp_opt
.saw_tstamp
&&
1525 !tp
->rx_opt
.cookie_out_never
&&
1526 (sysctl_tcp_cookie_size
> 0 ||
1527 (tp
->cookie_values
!= NULL
&&
1528 tp
->cookie_values
->cookie_desired
> 0))) {
1530 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1531 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1533 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1534 goto drop_and_release
;
1536 /* Secret recipe starts with IP addresses */
1537 *mess
++ ^= (__force u32
)daddr
;
1538 *mess
++ ^= (__force u32
)saddr
;
1540 /* plus variable length Initiator Cookie */
1543 *c
++ ^= *hash_location
++;
1545 want_cookie
= false; /* not our kind of cookie */
1546 tmp_ext
.cookie_out_never
= 0; /* false */
1547 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1548 } else if (!tp
->rx_opt
.cookie_in_always
) {
1549 /* redundant indications, but ensure initialization. */
1550 tmp_ext
.cookie_out_never
= 1; /* true */
1551 tmp_ext
.cookie_plus
= 0;
1553 goto drop_and_release
;
1555 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1557 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1558 tcp_clear_options(&tmp_opt
);
1560 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1561 tcp_openreq_init(req
, &tmp_opt
, skb
);
1563 ireq
= inet_rsk(req
);
1564 ireq
->loc_addr
= daddr
;
1565 ireq
->rmt_addr
= saddr
;
1566 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1567 ireq
->opt
= tcp_v4_save_options(skb
);
1569 if (security_inet_conn_request(sk
, skb
, req
))
1572 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1573 TCP_ECN_create_request(req
, skb
);
1576 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1577 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1579 /* VJ's idea. We save last timestamp seen
1580 * from the destination in peer table, when entering
1581 * state TIME-WAIT, and check against it before
1582 * accepting new connection request.
1584 * If "isn" is not zero, this request hit alive
1585 * timewait bucket, so that all the necessary checks
1586 * are made in the function processing timewait state.
1588 if (tmp_opt
.saw_tstamp
&&
1589 tcp_death_row
.sysctl_tw_recycle
&&
1590 (dst
= inet_csk_route_req(sk
, &fl4
, req
)) != NULL
&&
1591 fl4
.daddr
== saddr
) {
1592 if (!tcp_peer_is_proven(req
, dst
, true)) {
1593 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1594 goto drop_and_release
;
1597 /* Kill the following clause, if you dislike this way. */
1598 else if (!sysctl_tcp_syncookies
&&
1599 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1600 (sysctl_max_syn_backlog
>> 2)) &&
1601 !tcp_peer_is_proven(req
, dst
, false)) {
1602 /* Without syncookies last quarter of
1603 * backlog is filled with destinations,
1604 * proven to be alive.
1605 * It means that we continue to communicate
1606 * to destinations, already remembered
1607 * to the moment of synflood.
1609 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("drop open request from %pI4/%u\n"),
1610 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1611 goto drop_and_release
;
1614 isn
= tcp_v4_init_sequence(skb
);
1616 tcp_rsk(req
)->snt_isn
= isn
;
1619 dst
= inet_csk_route_req(sk
, &fl4
, req
);
1623 do_fastopen
= tcp_fastopen_check(sk
, skb
, req
, &foc
, &valid_foc
);
1625 /* We don't call tcp_v4_send_synack() directly because we need
1626 * to make sure a child socket can be created successfully before
1627 * sending back synack!
1629 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1630 * (or better yet, call tcp_send_synack() in the child context
1631 * directly, but will have to fix bunch of other code first)
1632 * after syn_recv_sock() except one will need to first fix the
1633 * latter to remove its dependency on the current implementation
1634 * of tcp_v4_send_synack()->tcp_select_initial_window().
1636 skb_synack
= tcp_make_synack(sk
, dst
, req
,
1637 (struct request_values
*)&tmp_ext
,
1638 fastopen_cookie_present(&valid_foc
) ? &valid_foc
: NULL
);
1641 __tcp_v4_send_check(skb_synack
, ireq
->loc_addr
, ireq
->rmt_addr
);
1642 skb_set_queue_mapping(skb_synack
, skb_get_queue_mapping(skb
));
1646 if (likely(!do_fastopen
)) {
1648 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1649 ireq
->rmt_addr
, ireq
->opt
);
1650 err
= net_xmit_eval(err
);
1651 if (err
|| want_cookie
)
1654 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1655 tcp_rsk(req
)->listener
= NULL
;
1656 /* Add the request_sock to the SYN table */
1657 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1658 if (fastopen_cookie_present(&foc
) && foc
.len
!= 0)
1659 NET_INC_STATS_BH(sock_net(sk
),
1660 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1661 } else if (tcp_v4_conn_req_fastopen(sk
, skb
, skb_synack
, req
,
1662 (struct request_values
*)&tmp_ext
))
1672 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1675 EXPORT_SYMBOL(tcp_v4_conn_request
);
1679 * The three way handshake has completed - we got a valid synack -
1680 * now create the new socket.
1682 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1683 struct request_sock
*req
,
1684 struct dst_entry
*dst
)
1686 struct inet_request_sock
*ireq
;
1687 struct inet_sock
*newinet
;
1688 struct tcp_sock
*newtp
;
1690 #ifdef CONFIG_TCP_MD5SIG
1691 struct tcp_md5sig_key
*key
;
1693 struct ip_options_rcu
*inet_opt
;
1695 if (sk_acceptq_is_full(sk
))
1698 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1702 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1703 inet_sk_rx_dst_set(newsk
, skb
);
1705 newtp
= tcp_sk(newsk
);
1706 newinet
= inet_sk(newsk
);
1707 ireq
= inet_rsk(req
);
1708 newinet
->inet_daddr
= ireq
->rmt_addr
;
1709 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1710 newinet
->inet_saddr
= ireq
->loc_addr
;
1711 inet_opt
= ireq
->opt
;
1712 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1714 newinet
->mc_index
= inet_iif(skb
);
1715 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1716 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1717 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1719 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1720 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1723 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1727 /* syncookie case : see end of cookie_v4_check() */
1729 sk_setup_caps(newsk
, dst
);
1731 tcp_mtup_init(newsk
);
1732 tcp_sync_mss(newsk
, dst_mtu(dst
));
1733 newtp
->advmss
= dst_metric_advmss(dst
);
1734 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1735 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1736 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1738 tcp_initialize_rcv_mss(newsk
);
1739 tcp_synack_rtt_meas(newsk
, req
);
1740 newtp
->total_retrans
= req
->num_retrans
;
1742 #ifdef CONFIG_TCP_MD5SIG
1743 /* Copy over the MD5 key from the original socket */
1744 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1748 * We're using one, so create a matching key
1749 * on the newsk structure. If we fail to get
1750 * memory, then we end up not copying the key
1753 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1754 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1755 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1759 if (__inet_inherit_port(sk
, newsk
) < 0)
1761 __inet_hash_nolisten(newsk
, NULL
);
1766 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1770 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1773 inet_csk_prepare_forced_close(newsk
);
1777 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1779 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1781 struct tcphdr
*th
= tcp_hdr(skb
);
1782 const struct iphdr
*iph
= ip_hdr(skb
);
1784 struct request_sock
**prev
;
1785 /* Find possible connection requests. */
1786 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1787 iph
->saddr
, iph
->daddr
);
1789 return tcp_check_req(sk
, skb
, req
, prev
, false);
1791 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1792 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1795 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1799 inet_twsk_put(inet_twsk(nsk
));
1803 #ifdef CONFIG_SYN_COOKIES
1805 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1810 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1812 const struct iphdr
*iph
= ip_hdr(skb
);
1814 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1815 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1816 iph
->daddr
, skb
->csum
)) {
1817 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1822 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1823 skb
->len
, IPPROTO_TCP
, 0);
1825 if (skb
->len
<= 76) {
1826 return __skb_checksum_complete(skb
);
1832 /* The socket must have it's spinlock held when we get
1835 * We have a potential double-lock case here, so even when
1836 * doing backlog processing we use the BH locking scheme.
1837 * This is because we cannot sleep with the original spinlock
1840 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1843 #ifdef CONFIG_TCP_MD5SIG
1845 * We really want to reject the packet as early as possible
1847 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1848 * o There is an MD5 option and we're not expecting one
1850 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1854 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1855 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1857 sock_rps_save_rxhash(sk
, skb
);
1859 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1860 dst
->ops
->check(dst
, 0) == NULL
) {
1862 sk
->sk_rx_dst
= NULL
;
1865 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1872 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1875 if (sk
->sk_state
== TCP_LISTEN
) {
1876 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1881 sock_rps_save_rxhash(nsk
, skb
);
1882 if (tcp_child_process(sk
, nsk
, skb
)) {
1889 sock_rps_save_rxhash(sk
, skb
);
1891 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1898 tcp_v4_send_reset(rsk
, skb
);
1901 /* Be careful here. If this function gets more complicated and
1902 * gcc suffers from register pressure on the x86, sk (in %ebx)
1903 * might be destroyed here. This current version compiles correctly,
1904 * but you have been warned.
1909 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1912 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1914 void tcp_v4_early_demux(struct sk_buff
*skb
)
1916 const struct iphdr
*iph
;
1917 const struct tcphdr
*th
;
1920 if (skb
->pkt_type
!= PACKET_HOST
)
1923 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1929 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1932 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1933 iph
->saddr
, th
->source
,
1934 iph
->daddr
, ntohs(th
->dest
),
1938 skb
->destructor
= sock_edemux
;
1939 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1940 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1943 dst
= dst_check(dst
, 0);
1945 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1946 skb_dst_set_noref(skb
, dst
);
1955 int tcp_v4_rcv(struct sk_buff
*skb
)
1957 const struct iphdr
*iph
;
1958 const struct tcphdr
*th
;
1961 struct net
*net
= dev_net(skb
->dev
);
1963 if (skb
->pkt_type
!= PACKET_HOST
)
1966 /* Count it even if it's bad */
1967 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1969 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1974 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1976 if (!pskb_may_pull(skb
, th
->doff
* 4))
1979 /* An explanation is required here, I think.
1980 * Packet length and doff are validated by header prediction,
1981 * provided case of th->doff==0 is eliminated.
1982 * So, we defer the checks. */
1983 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1988 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1989 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1990 skb
->len
- th
->doff
* 4);
1991 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1992 TCP_SKB_CB(skb
)->when
= 0;
1993 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1994 TCP_SKB_CB(skb
)->sacked
= 0;
1996 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
2001 if (sk
->sk_state
== TCP_TIME_WAIT
)
2004 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
2005 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
2006 goto discard_and_relse
;
2009 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
2010 goto discard_and_relse
;
2013 if (sk_filter(sk
, skb
))
2014 goto discard_and_relse
;
2018 bh_lock_sock_nested(sk
);
2020 if (!sock_owned_by_user(sk
)) {
2021 #ifdef CONFIG_NET_DMA
2022 struct tcp_sock
*tp
= tcp_sk(sk
);
2023 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
2024 tp
->ucopy
.dma_chan
= net_dma_find_channel();
2025 if (tp
->ucopy
.dma_chan
)
2026 ret
= tcp_v4_do_rcv(sk
, skb
);
2030 if (!tcp_prequeue(sk
, skb
))
2031 ret
= tcp_v4_do_rcv(sk
, skb
);
2033 } else if (unlikely(sk_add_backlog(sk
, skb
,
2034 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
2036 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
2037 goto discard_and_relse
;
2046 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2049 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2051 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2053 tcp_v4_send_reset(NULL
, skb
);
2057 /* Discard frame. */
2066 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
2067 inet_twsk_put(inet_twsk(sk
));
2071 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2072 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2073 inet_twsk_put(inet_twsk(sk
));
2076 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
2078 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
2080 iph
->daddr
, th
->dest
,
2083 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
2084 inet_twsk_put(inet_twsk(sk
));
2088 /* Fall through to ACK */
2091 tcp_v4_timewait_ack(sk
, skb
);
2095 case TCP_TW_SUCCESS
:;
2100 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
2101 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
2102 .twsk_unique
= tcp_twsk_unique
,
2103 .twsk_destructor
= tcp_twsk_destructor
,
2106 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
2108 struct dst_entry
*dst
= skb_dst(skb
);
2111 sk
->sk_rx_dst
= dst
;
2112 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
2114 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
2116 const struct inet_connection_sock_af_ops ipv4_specific
= {
2117 .queue_xmit
= ip_queue_xmit
,
2118 .send_check
= tcp_v4_send_check
,
2119 .rebuild_header
= inet_sk_rebuild_header
,
2120 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
2121 .conn_request
= tcp_v4_conn_request
,
2122 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
2123 .net_header_len
= sizeof(struct iphdr
),
2124 .setsockopt
= ip_setsockopt
,
2125 .getsockopt
= ip_getsockopt
,
2126 .addr2sockaddr
= inet_csk_addr2sockaddr
,
2127 .sockaddr_len
= sizeof(struct sockaddr_in
),
2128 .bind_conflict
= inet_csk_bind_conflict
,
2129 #ifdef CONFIG_COMPAT
2130 .compat_setsockopt
= compat_ip_setsockopt
,
2131 .compat_getsockopt
= compat_ip_getsockopt
,
2134 EXPORT_SYMBOL(ipv4_specific
);
2136 #ifdef CONFIG_TCP_MD5SIG
2137 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
2138 .md5_lookup
= tcp_v4_md5_lookup
,
2139 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
2140 .md5_parse
= tcp_v4_parse_md5_keys
,
2144 /* NOTE: A lot of things set to zero explicitly by call to
2145 * sk_alloc() so need not be done here.
2147 static int tcp_v4_init_sock(struct sock
*sk
)
2149 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2153 icsk
->icsk_af_ops
= &ipv4_specific
;
2155 #ifdef CONFIG_TCP_MD5SIG
2156 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
2162 void tcp_v4_destroy_sock(struct sock
*sk
)
2164 struct tcp_sock
*tp
= tcp_sk(sk
);
2166 tcp_clear_xmit_timers(sk
);
2168 tcp_cleanup_congestion_control(sk
);
2170 /* Cleanup up the write buffer. */
2171 tcp_write_queue_purge(sk
);
2173 /* Cleans up our, hopefully empty, out_of_order_queue. */
2174 __skb_queue_purge(&tp
->out_of_order_queue
);
2176 #ifdef CONFIG_TCP_MD5SIG
2177 /* Clean up the MD5 key list, if any */
2178 if (tp
->md5sig_info
) {
2179 tcp_clear_md5_list(sk
);
2180 kfree_rcu(tp
->md5sig_info
, rcu
);
2181 tp
->md5sig_info
= NULL
;
2185 #ifdef CONFIG_NET_DMA
2186 /* Cleans up our sk_async_wait_queue */
2187 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2190 /* Clean prequeue, it must be empty really */
2191 __skb_queue_purge(&tp
->ucopy
.prequeue
);
2193 /* Clean up a referenced TCP bind bucket. */
2194 if (inet_csk(sk
)->icsk_bind_hash
)
2197 /* TCP Cookie Transactions */
2198 if (tp
->cookie_values
!= NULL
) {
2199 kref_put(&tp
->cookie_values
->kref
,
2200 tcp_cookie_values_release
);
2201 tp
->cookie_values
= NULL
;
2203 BUG_ON(tp
->fastopen_rsk
!= NULL
);
2205 /* If socket is aborted during connect operation */
2206 tcp_free_fastopen_req(tp
);
2208 sk_sockets_allocated_dec(sk
);
2209 sock_release_memcg(sk
);
2211 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
2213 #ifdef CONFIG_PROC_FS
2214 /* Proc filesystem TCP sock list dumping. */
2216 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
2218 return hlist_nulls_empty(head
) ? NULL
:
2219 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
2222 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
2224 return !is_a_nulls(tw
->tw_node
.next
) ?
2225 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
2229 * Get next listener socket follow cur. If cur is NULL, get first socket
2230 * starting from bucket given in st->bucket; when st->bucket is zero the
2231 * very first socket in the hash table is returned.
2233 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
2235 struct inet_connection_sock
*icsk
;
2236 struct hlist_nulls_node
*node
;
2237 struct sock
*sk
= cur
;
2238 struct inet_listen_hashbucket
*ilb
;
2239 struct tcp_iter_state
*st
= seq
->private;
2240 struct net
*net
= seq_file_net(seq
);
2243 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2244 spin_lock_bh(&ilb
->lock
);
2245 sk
= sk_nulls_head(&ilb
->head
);
2249 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2253 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
2254 struct request_sock
*req
= cur
;
2256 icsk
= inet_csk(st
->syn_wait_sk
);
2260 if (req
->rsk_ops
->family
== st
->family
) {
2266 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2269 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2271 sk
= sk_nulls_next(st
->syn_wait_sk
);
2272 st
->state
= TCP_SEQ_STATE_LISTENING
;
2273 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2275 icsk
= inet_csk(sk
);
2276 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2277 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2279 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2280 sk
= sk_nulls_next(sk
);
2283 sk_nulls_for_each_from(sk
, node
) {
2284 if (!net_eq(sock_net(sk
), net
))
2286 if (sk
->sk_family
== st
->family
) {
2290 icsk
= inet_csk(sk
);
2291 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2292 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2294 st
->uid
= sock_i_uid(sk
);
2295 st
->syn_wait_sk
= sk
;
2296 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2300 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2302 spin_unlock_bh(&ilb
->lock
);
2304 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2305 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2306 spin_lock_bh(&ilb
->lock
);
2307 sk
= sk_nulls_head(&ilb
->head
);
2315 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2317 struct tcp_iter_state
*st
= seq
->private;
2322 rc
= listening_get_next(seq
, NULL
);
2324 while (rc
&& *pos
) {
2325 rc
= listening_get_next(seq
, rc
);
2331 static inline bool empty_bucket(struct tcp_iter_state
*st
)
2333 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2334 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2338 * Get first established socket starting from bucket given in st->bucket.
2339 * If st->bucket is zero, the very first socket in the hash is returned.
2341 static void *established_get_first(struct seq_file
*seq
)
2343 struct tcp_iter_state
*st
= seq
->private;
2344 struct net
*net
= seq_file_net(seq
);
2348 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2350 struct hlist_nulls_node
*node
;
2351 struct inet_timewait_sock
*tw
;
2352 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2354 /* Lockless fast path for the common case of empty buckets */
2355 if (empty_bucket(st
))
2359 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2360 if (sk
->sk_family
!= st
->family
||
2361 !net_eq(sock_net(sk
), net
)) {
2367 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2368 inet_twsk_for_each(tw
, node
,
2369 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2370 if (tw
->tw_family
!= st
->family
||
2371 !net_eq(twsk_net(tw
), net
)) {
2377 spin_unlock_bh(lock
);
2378 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2384 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2386 struct sock
*sk
= cur
;
2387 struct inet_timewait_sock
*tw
;
2388 struct hlist_nulls_node
*node
;
2389 struct tcp_iter_state
*st
= seq
->private;
2390 struct net
*net
= seq_file_net(seq
);
2395 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2399 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2406 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2407 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2409 /* Look for next non empty bucket */
2411 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2414 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2417 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2418 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2420 sk
= sk_nulls_next(sk
);
2422 sk_nulls_for_each_from(sk
, node
) {
2423 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2427 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2428 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2436 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2438 struct tcp_iter_state
*st
= seq
->private;
2442 rc
= established_get_first(seq
);
2445 rc
= established_get_next(seq
, rc
);
2451 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2454 struct tcp_iter_state
*st
= seq
->private;
2456 st
->state
= TCP_SEQ_STATE_LISTENING
;
2457 rc
= listening_get_idx(seq
, &pos
);
2460 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2461 rc
= established_get_idx(seq
, pos
);
2467 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2469 struct tcp_iter_state
*st
= seq
->private;
2470 int offset
= st
->offset
;
2471 int orig_num
= st
->num
;
2474 switch (st
->state
) {
2475 case TCP_SEQ_STATE_OPENREQ
:
2476 case TCP_SEQ_STATE_LISTENING
:
2477 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2479 st
->state
= TCP_SEQ_STATE_LISTENING
;
2480 rc
= listening_get_next(seq
, NULL
);
2481 while (offset
-- && rc
)
2482 rc
= listening_get_next(seq
, rc
);
2487 case TCP_SEQ_STATE_ESTABLISHED
:
2488 case TCP_SEQ_STATE_TIME_WAIT
:
2489 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2490 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2492 rc
= established_get_first(seq
);
2493 while (offset
-- && rc
)
2494 rc
= established_get_next(seq
, rc
);
2502 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2504 struct tcp_iter_state
*st
= seq
->private;
2507 if (*pos
&& *pos
== st
->last_pos
) {
2508 rc
= tcp_seek_last_pos(seq
);
2513 st
->state
= TCP_SEQ_STATE_LISTENING
;
2517 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2520 st
->last_pos
= *pos
;
2524 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2526 struct tcp_iter_state
*st
= seq
->private;
2529 if (v
== SEQ_START_TOKEN
) {
2530 rc
= tcp_get_idx(seq
, 0);
2534 switch (st
->state
) {
2535 case TCP_SEQ_STATE_OPENREQ
:
2536 case TCP_SEQ_STATE_LISTENING
:
2537 rc
= listening_get_next(seq
, v
);
2539 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2542 rc
= established_get_first(seq
);
2545 case TCP_SEQ_STATE_ESTABLISHED
:
2546 case TCP_SEQ_STATE_TIME_WAIT
:
2547 rc
= established_get_next(seq
, v
);
2552 st
->last_pos
= *pos
;
2556 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2558 struct tcp_iter_state
*st
= seq
->private;
2560 switch (st
->state
) {
2561 case TCP_SEQ_STATE_OPENREQ
:
2563 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2564 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2566 case TCP_SEQ_STATE_LISTENING
:
2567 if (v
!= SEQ_START_TOKEN
)
2568 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2570 case TCP_SEQ_STATE_TIME_WAIT
:
2571 case TCP_SEQ_STATE_ESTABLISHED
:
2573 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2578 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2580 struct tcp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
2581 struct tcp_iter_state
*s
;
2584 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2585 sizeof(struct tcp_iter_state
));
2589 s
= ((struct seq_file
*)file
->private_data
)->private;
2590 s
->family
= afinfo
->family
;
2594 EXPORT_SYMBOL(tcp_seq_open
);
2596 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2599 struct proc_dir_entry
*p
;
2601 afinfo
->seq_ops
.start
= tcp_seq_start
;
2602 afinfo
->seq_ops
.next
= tcp_seq_next
;
2603 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2605 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2606 afinfo
->seq_fops
, afinfo
);
2611 EXPORT_SYMBOL(tcp_proc_register
);
2613 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2615 proc_net_remove(net
, afinfo
->name
);
2617 EXPORT_SYMBOL(tcp_proc_unregister
);
2619 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2620 struct seq_file
*f
, int i
, kuid_t uid
, int *len
)
2622 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2623 long delta
= req
->expires
- jiffies
;
2625 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2626 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2629 ntohs(inet_sk(sk
)->inet_sport
),
2631 ntohs(ireq
->rmt_port
),
2633 0, 0, /* could print option size, but that is af dependent. */
2634 1, /* timers active (only the expire timer) */
2635 jiffies_delta_to_clock_t(delta
),
2637 from_kuid_munged(seq_user_ns(f
), uid
),
2638 0, /* non standard timer */
2639 0, /* open_requests have no inode */
2640 atomic_read(&sk
->sk_refcnt
),
2645 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2648 unsigned long timer_expires
;
2649 const struct tcp_sock
*tp
= tcp_sk(sk
);
2650 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2651 const struct inet_sock
*inet
= inet_sk(sk
);
2652 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2653 __be32 dest
= inet
->inet_daddr
;
2654 __be32 src
= inet
->inet_rcv_saddr
;
2655 __u16 destp
= ntohs(inet
->inet_dport
);
2656 __u16 srcp
= ntohs(inet
->inet_sport
);
2659 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2661 timer_expires
= icsk
->icsk_timeout
;
2662 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2664 timer_expires
= icsk
->icsk_timeout
;
2665 } else if (timer_pending(&sk
->sk_timer
)) {
2667 timer_expires
= sk
->sk_timer
.expires
;
2670 timer_expires
= jiffies
;
2673 if (sk
->sk_state
== TCP_LISTEN
)
2674 rx_queue
= sk
->sk_ack_backlog
;
2677 * because we dont lock socket, we might find a transient negative value
2679 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2681 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2682 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2683 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2684 tp
->write_seq
- tp
->snd_una
,
2687 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2688 icsk
->icsk_retransmits
,
2689 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2690 icsk
->icsk_probes_out
,
2692 atomic_read(&sk
->sk_refcnt
), sk
,
2693 jiffies_to_clock_t(icsk
->icsk_rto
),
2694 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2695 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2697 sk
->sk_state
== TCP_LISTEN
?
2698 (fastopenq
? fastopenq
->max_qlen
: 0) :
2699 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
),
2703 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2704 struct seq_file
*f
, int i
, int *len
)
2708 long delta
= tw
->tw_ttd
- jiffies
;
2710 dest
= tw
->tw_daddr
;
2711 src
= tw
->tw_rcv_saddr
;
2712 destp
= ntohs(tw
->tw_dport
);
2713 srcp
= ntohs(tw
->tw_sport
);
2715 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2716 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2717 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2718 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2719 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2724 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2726 struct tcp_iter_state
*st
;
2729 if (v
== SEQ_START_TOKEN
) {
2730 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2731 " sl local_address rem_address st tx_queue "
2732 "rx_queue tr tm->when retrnsmt uid timeout "
2738 switch (st
->state
) {
2739 case TCP_SEQ_STATE_LISTENING
:
2740 case TCP_SEQ_STATE_ESTABLISHED
:
2741 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2743 case TCP_SEQ_STATE_OPENREQ
:
2744 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2746 case TCP_SEQ_STATE_TIME_WAIT
:
2747 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2750 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2755 static const struct file_operations tcp_afinfo_seq_fops
= {
2756 .owner
= THIS_MODULE
,
2757 .open
= tcp_seq_open
,
2759 .llseek
= seq_lseek
,
2760 .release
= seq_release_net
2763 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2766 .seq_fops
= &tcp_afinfo_seq_fops
,
2768 .show
= tcp4_seq_show
,
2772 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2774 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2777 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2779 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2782 static struct pernet_operations tcp4_net_ops
= {
2783 .init
= tcp4_proc_init_net
,
2784 .exit
= tcp4_proc_exit_net
,
2787 int __init
tcp4_proc_init(void)
2789 return register_pernet_subsys(&tcp4_net_ops
);
2792 void tcp4_proc_exit(void)
2794 unregister_pernet_subsys(&tcp4_net_ops
);
2796 #endif /* CONFIG_PROC_FS */
2798 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2800 const struct iphdr
*iph
= skb_gro_network_header(skb
);
2804 switch (skb
->ip_summed
) {
2805 case CHECKSUM_COMPLETE
:
2806 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2808 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2812 NAPI_GRO_CB(skb
)->flush
= 1;
2816 wsum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
2817 skb_gro_len(skb
), IPPROTO_TCP
, 0);
2818 sum
= csum_fold(skb_checksum(skb
,
2819 skb_gro_offset(skb
),
2825 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2829 return tcp_gro_receive(head
, skb
);
2832 int tcp4_gro_complete(struct sk_buff
*skb
)
2834 const struct iphdr
*iph
= ip_hdr(skb
);
2835 struct tcphdr
*th
= tcp_hdr(skb
);
2837 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2838 iph
->saddr
, iph
->daddr
, 0);
2839 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2841 return tcp_gro_complete(skb
);
2844 struct proto tcp_prot
= {
2846 .owner
= THIS_MODULE
,
2848 .connect
= tcp_v4_connect
,
2849 .disconnect
= tcp_disconnect
,
2850 .accept
= inet_csk_accept
,
2852 .init
= tcp_v4_init_sock
,
2853 .destroy
= tcp_v4_destroy_sock
,
2854 .shutdown
= tcp_shutdown
,
2855 .setsockopt
= tcp_setsockopt
,
2856 .getsockopt
= tcp_getsockopt
,
2857 .recvmsg
= tcp_recvmsg
,
2858 .sendmsg
= tcp_sendmsg
,
2859 .sendpage
= tcp_sendpage
,
2860 .backlog_rcv
= tcp_v4_do_rcv
,
2861 .release_cb
= tcp_release_cb
,
2862 .mtu_reduced
= tcp_v4_mtu_reduced
,
2864 .unhash
= inet_unhash
,
2865 .get_port
= inet_csk_get_port
,
2866 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2867 .sockets_allocated
= &tcp_sockets_allocated
,
2868 .orphan_count
= &tcp_orphan_count
,
2869 .memory_allocated
= &tcp_memory_allocated
,
2870 .memory_pressure
= &tcp_memory_pressure
,
2871 .sysctl_wmem
= sysctl_tcp_wmem
,
2872 .sysctl_rmem
= sysctl_tcp_rmem
,
2873 .max_header
= MAX_TCP_HEADER
,
2874 .obj_size
= sizeof(struct tcp_sock
),
2875 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2876 .twsk_prot
= &tcp_timewait_sock_ops
,
2877 .rsk_prot
= &tcp_request_sock_ops
,
2878 .h
.hashinfo
= &tcp_hashinfo
,
2879 .no_autobind
= true,
2880 #ifdef CONFIG_COMPAT
2881 .compat_setsockopt
= compat_tcp_setsockopt
,
2882 .compat_getsockopt
= compat_tcp_getsockopt
,
2884 #ifdef CONFIG_MEMCG_KMEM
2885 .init_cgroup
= tcp_init_cgroup
,
2886 .destroy_cgroup
= tcp_destroy_cgroup
,
2887 .proto_cgroup
= tcp_proto_cgroup
,
2890 EXPORT_SYMBOL(tcp_prot
);
2892 static int __net_init
tcp_sk_init(struct net
*net
)
2897 static void __net_exit
tcp_sk_exit(struct net
*net
)
2901 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2903 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2906 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2907 .init
= tcp_sk_init
,
2908 .exit
= tcp_sk_exit
,
2909 .exit_batch
= tcp_sk_exit_batch
,
2912 void __init
tcp_v4_init(void)
2914 inet_hashinfo_init(&tcp_hashinfo
);
2915 if (register_pernet_subsys(&tcp_sk_ops
))
2916 panic("Failed to create the TCP control socket.\n");