hyperv: Add processing of MTU reduced by the host
[linux/fpc-iii.git] / net / ipv4 / tcp_ipv4.c
blob2c6a955fd5c32bf102534a6d4f8a661cf5641331
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
136 return 0;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
148 struct flowi4 *fl4;
149 struct rtable *rt;
150 int err;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 return err;
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
187 if (!inet_opt || !inet_opt->opt.srr)
188 daddr = fl4->daddr;
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 inet->inet_rcv_saddr = inet->inet_saddr;
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr;
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
222 if (err)
223 goto failure;
225 inet_set_txhash(sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
248 rt = NULL;
249 if (err)
250 goto failure;
252 return 0;
254 failure:
256 * This unhashes the socket and releases the local port,
257 * if necessary.
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
263 return err;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
280 return;
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
288 mtu = dst_mtu(dst);
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
309 if (dst)
310 dst->ops->redirect(dst, sk, skb);
314 * This routine is called by the ICMP module when it gets some
315 * sort of error condition. If err < 0 then the socket should
316 * be closed and the error returned to the user. If err > 0
317 * it's just the icmp type << 8 | icmp code. After adjustment
318 * header points to the first 8 bytes of the tcp header. We need
319 * to find the appropriate port.
321 * The locking strategy used here is very "optimistic". When
322 * someone else accesses the socket the ICMP is just dropped
323 * and for some paths there is no check at all.
324 * A more general error queue to queue errors for later handling
325 * is probably better.
329 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
331 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
332 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
333 struct inet_connection_sock *icsk;
334 struct tcp_sock *tp;
335 struct inet_sock *inet;
336 const int type = icmp_hdr(icmp_skb)->type;
337 const int code = icmp_hdr(icmp_skb)->code;
338 struct sock *sk;
339 struct sk_buff *skb;
340 struct request_sock *fastopen;
341 __u32 seq, snd_una;
342 __u32 remaining;
343 int err;
344 struct net *net = dev_net(icmp_skb->dev);
346 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
347 iph->saddr, th->source, inet_iif(icmp_skb));
348 if (!sk) {
349 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
350 return;
352 if (sk->sk_state == TCP_TIME_WAIT) {
353 inet_twsk_put(inet_twsk(sk));
354 return;
357 bh_lock_sock(sk);
358 /* If too many ICMPs get dropped on busy
359 * servers this needs to be solved differently.
360 * We do take care of PMTU discovery (RFC1191) special case :
361 * we can receive locally generated ICMP messages while socket is held.
363 if (sock_owned_by_user(sk)) {
364 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
370 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372 goto out;
375 icsk = inet_csk(sk);
376 tp = tcp_sk(sk);
377 seq = ntohl(th->seq);
378 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379 fastopen = tp->fastopen_rsk;
380 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
381 if (sk->sk_state != TCP_LISTEN &&
382 !between(seq, snd_una, tp->snd_nxt)) {
383 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
384 goto out;
387 switch (type) {
388 case ICMP_REDIRECT:
389 do_redirect(icmp_skb, sk);
390 goto out;
391 case ICMP_SOURCE_QUENCH:
392 /* Just silently ignore these. */
393 goto out;
394 case ICMP_PARAMETERPROB:
395 err = EPROTO;
396 break;
397 case ICMP_DEST_UNREACH:
398 if (code > NR_ICMP_UNREACH)
399 goto out;
401 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
402 /* We are not interested in TCP_LISTEN and open_requests
403 * (SYN-ACKs send out by Linux are always <576bytes so
404 * they should go through unfragmented).
406 if (sk->sk_state == TCP_LISTEN)
407 goto out;
409 tp->mtu_info = info;
410 if (!sock_owned_by_user(sk)) {
411 tcp_v4_mtu_reduced(sk);
412 } else {
413 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
414 sock_hold(sk);
416 goto out;
419 err = icmp_err_convert[code].errno;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
423 break;
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
425 !icsk->icsk_backoff || fastopen)
426 break;
428 if (sock_owned_by_user(sk))
429 break;
431 icsk->icsk_backoff--;
432 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
433 TCP_TIMEOUT_INIT;
434 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
436 skb = tcp_write_queue_head(sk);
437 BUG_ON(!skb);
439 remaining = icsk->icsk_rto -
440 min(icsk->icsk_rto,
441 tcp_time_stamp - tcp_skb_timestamp(skb));
443 if (remaining) {
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
446 } else {
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
452 break;
453 case ICMP_TIME_EXCEEDED:
454 err = EHOSTUNREACH;
455 break;
456 default:
457 goto out;
460 switch (sk->sk_state) {
461 struct request_sock *req, **prev;
462 case TCP_LISTEN:
463 if (sock_owned_by_user(sk))
464 goto out;
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
468 if (!req)
469 goto out;
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
474 WARN_ON(req->sk);
476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
478 goto out;
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk, req, prev);
488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
489 goto out;
491 case TCP_SYN_SENT:
492 case TCP_SYN_RECV:
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
496 if (fastopen && fastopen->sk == NULL)
497 break;
499 if (!sock_owned_by_user(sk)) {
500 sk->sk_err = err;
502 sk->sk_error_report(sk);
504 tcp_done(sk);
505 } else {
506 sk->sk_err_soft = err;
508 goto out;
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 * Now we are in compliance with RFCs.
524 * --ANK (980905)
527 inet = inet_sk(sk);
528 if (!sock_owned_by_user(sk) && inet->recverr) {
529 sk->sk_err = err;
530 sk->sk_error_report(sk);
531 } else { /* Only an error on timeout */
532 sk->sk_err_soft = err;
535 out:
536 bh_unlock_sock(sk);
537 sock_put(sk);
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 struct tcphdr *th = tcp_hdr(skb);
544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546 skb->csum_start = skb_transport_header(skb) - skb->head;
547 skb->csum_offset = offsetof(struct tcphdr, check);
548 } else {
549 th->check = tcp_v4_check(skb->len, saddr, daddr,
550 csum_partial(th,
551 th->doff << 2,
552 skb->csum));
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 const struct inet_sock *inet = inet_sk(sk);
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 EXPORT_SYMBOL(tcp_v4_send_check);
566 * This routine will send an RST to the other tcp.
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569 * for reset.
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
578 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 const struct tcphdr *th = tcp_hdr(skb);
581 struct {
582 struct tcphdr th;
583 #ifdef CONFIG_TCP_MD5SIG
584 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
585 #endif
586 } rep;
587 struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key *key;
590 const __u8 *hash_location = NULL;
591 unsigned char newhash[16];
592 int genhash;
593 struct sock *sk1 = NULL;
594 #endif
595 struct net *net;
597 /* Never send a reset in response to a reset. */
598 if (th->rst)
599 return;
601 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
602 return;
604 /* Swap the send and the receive. */
605 memset(&rep, 0, sizeof(rep));
606 rep.th.dest = th->source;
607 rep.th.source = th->dest;
608 rep.th.doff = sizeof(struct tcphdr) / 4;
609 rep.th.rst = 1;
611 if (th->ack) {
612 rep.th.seq = th->ack_seq;
613 } else {
614 rep.th.ack = 1;
615 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
616 skb->len - (th->doff << 2));
619 memset(&arg, 0, sizeof(arg));
620 arg.iov[0].iov_base = (unsigned char *)&rep;
621 arg.iov[0].iov_len = sizeof(rep.th);
623 #ifdef CONFIG_TCP_MD5SIG
624 hash_location = tcp_parse_md5sig_option(th);
625 if (!sk && hash_location) {
627 * active side is lost. Try to find listening socket through
628 * source port, and then find md5 key through listening socket.
629 * we are not loose security here:
630 * Incoming packet is checked with md5 hash with finding key,
631 * no RST generated if md5 hash doesn't match.
633 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
634 &tcp_hashinfo, ip_hdr(skb)->saddr,
635 th->source, ip_hdr(skb)->daddr,
636 ntohs(th->source), inet_iif(skb));
637 /* don't send rst if it can't find key */
638 if (!sk1)
639 return;
640 rcu_read_lock();
641 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
642 &ip_hdr(skb)->saddr, AF_INET);
643 if (!key)
644 goto release_sk1;
646 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
647 if (genhash || memcmp(hash_location, newhash, 16) != 0)
648 goto release_sk1;
649 } else {
650 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
651 &ip_hdr(skb)->saddr,
652 AF_INET) : NULL;
655 if (key) {
656 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
657 (TCPOPT_NOP << 16) |
658 (TCPOPT_MD5SIG << 8) |
659 TCPOLEN_MD5SIG);
660 /* Update length and the length the header thinks exists */
661 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
662 rep.th.doff = arg.iov[0].iov_len / 4;
664 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
665 key, ip_hdr(skb)->saddr,
666 ip_hdr(skb)->daddr, &rep.th);
668 #endif
669 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
670 ip_hdr(skb)->saddr, /* XXX */
671 arg.iov[0].iov_len, IPPROTO_TCP, 0);
672 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
673 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
674 /* When socket is gone, all binding information is lost.
675 * routing might fail in this case. No choice here, if we choose to force
676 * input interface, we will misroute in case of asymmetric route.
678 if (sk)
679 arg.bound_dev_if = sk->sk_bound_dev_if;
681 net = dev_net(skb_dst(skb)->dev);
682 arg.tos = ip_hdr(skb)->tos;
683 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
684 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
685 &arg, arg.iov[0].iov_len);
687 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
688 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
690 #ifdef CONFIG_TCP_MD5SIG
691 release_sk1:
692 if (sk1) {
693 rcu_read_unlock();
694 sock_put(sk1);
696 #endif
699 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
700 outside socket context is ugly, certainly. What can I do?
703 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
704 u32 win, u32 tsval, u32 tsecr, int oif,
705 struct tcp_md5sig_key *key,
706 int reply_flags, u8 tos)
708 const struct tcphdr *th = tcp_hdr(skb);
709 struct {
710 struct tcphdr th;
711 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
712 #ifdef CONFIG_TCP_MD5SIG
713 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
714 #endif
716 } rep;
717 struct ip_reply_arg arg;
718 struct net *net = dev_net(skb_dst(skb)->dev);
720 memset(&rep.th, 0, sizeof(struct tcphdr));
721 memset(&arg, 0, sizeof(arg));
723 arg.iov[0].iov_base = (unsigned char *)&rep;
724 arg.iov[0].iov_len = sizeof(rep.th);
725 if (tsecr) {
726 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
727 (TCPOPT_TIMESTAMP << 8) |
728 TCPOLEN_TIMESTAMP);
729 rep.opt[1] = htonl(tsval);
730 rep.opt[2] = htonl(tsecr);
731 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
734 /* Swap the send and the receive. */
735 rep.th.dest = th->source;
736 rep.th.source = th->dest;
737 rep.th.doff = arg.iov[0].iov_len / 4;
738 rep.th.seq = htonl(seq);
739 rep.th.ack_seq = htonl(ack);
740 rep.th.ack = 1;
741 rep.th.window = htons(win);
743 #ifdef CONFIG_TCP_MD5SIG
744 if (key) {
745 int offset = (tsecr) ? 3 : 0;
747 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
748 (TCPOPT_NOP << 16) |
749 (TCPOPT_MD5SIG << 8) |
750 TCPOLEN_MD5SIG);
751 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
752 rep.th.doff = arg.iov[0].iov_len/4;
754 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
755 key, ip_hdr(skb)->saddr,
756 ip_hdr(skb)->daddr, &rep.th);
758 #endif
759 arg.flags = reply_flags;
760 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
761 ip_hdr(skb)->saddr, /* XXX */
762 arg.iov[0].iov_len, IPPROTO_TCP, 0);
763 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
764 if (oif)
765 arg.bound_dev_if = oif;
766 arg.tos = tos;
767 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
768 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
769 &arg, arg.iov[0].iov_len);
771 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
774 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
776 struct inet_timewait_sock *tw = inet_twsk(sk);
777 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
779 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
780 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
781 tcp_time_stamp + tcptw->tw_ts_offset,
782 tcptw->tw_ts_recent,
783 tw->tw_bound_dev_if,
784 tcp_twsk_md5_key(tcptw),
785 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
786 tw->tw_tos
789 inet_twsk_put(tw);
792 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
793 struct request_sock *req)
795 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
796 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
798 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
799 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
800 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
801 tcp_time_stamp,
802 req->ts_recent,
804 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
805 AF_INET),
806 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
807 ip_hdr(skb)->tos);
811 * Send a SYN-ACK after having received a SYN.
812 * This still operates on a request_sock only, not on a big
813 * socket.
815 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
816 struct flowi *fl,
817 struct request_sock *req,
818 u16 queue_mapping,
819 struct tcp_fastopen_cookie *foc)
821 const struct inet_request_sock *ireq = inet_rsk(req);
822 struct flowi4 fl4;
823 int err = -1;
824 struct sk_buff *skb;
826 /* First, grab a route. */
827 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
828 return -1;
830 skb = tcp_make_synack(sk, dst, req, foc);
832 if (skb) {
833 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
835 skb_set_queue_mapping(skb, queue_mapping);
836 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
837 ireq->ir_rmt_addr,
838 ireq->opt);
839 err = net_xmit_eval(err);
842 return err;
846 * IPv4 request_sock destructor.
848 static void tcp_v4_reqsk_destructor(struct request_sock *req)
850 kfree(inet_rsk(req)->opt);
854 * Return true if a syncookie should be sent
856 bool tcp_syn_flood_action(struct sock *sk,
857 const struct sk_buff *skb,
858 const char *proto)
860 const char *msg = "Dropping request";
861 bool want_cookie = false;
862 struct listen_sock *lopt;
864 #ifdef CONFIG_SYN_COOKIES
865 if (sysctl_tcp_syncookies) {
866 msg = "Sending cookies";
867 want_cookie = true;
868 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
869 } else
870 #endif
871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
873 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
874 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
875 lopt->synflood_warned = 1;
876 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
877 proto, ntohs(tcp_hdr(skb)->dest), msg);
879 return want_cookie;
881 EXPORT_SYMBOL(tcp_syn_flood_action);
883 #ifdef CONFIG_TCP_MD5SIG
885 * RFC2385 MD5 checksumming requires a mapping of
886 * IP address->MD5 Key.
887 * We need to maintain these in the sk structure.
890 /* Find the Key structure for an address. */
891 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
892 const union tcp_md5_addr *addr,
893 int family)
895 struct tcp_sock *tp = tcp_sk(sk);
896 struct tcp_md5sig_key *key;
897 unsigned int size = sizeof(struct in_addr);
898 struct tcp_md5sig_info *md5sig;
900 /* caller either holds rcu_read_lock() or socket lock */
901 md5sig = rcu_dereference_check(tp->md5sig_info,
902 sock_owned_by_user(sk) ||
903 lockdep_is_held(&sk->sk_lock.slock));
904 if (!md5sig)
905 return NULL;
906 #if IS_ENABLED(CONFIG_IPV6)
907 if (family == AF_INET6)
908 size = sizeof(struct in6_addr);
909 #endif
910 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
911 if (key->family != family)
912 continue;
913 if (!memcmp(&key->addr, addr, size))
914 return key;
916 return NULL;
918 EXPORT_SYMBOL(tcp_md5_do_lookup);
920 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
921 struct sock *addr_sk)
923 union tcp_md5_addr *addr;
925 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
926 return tcp_md5_do_lookup(sk, addr, AF_INET);
928 EXPORT_SYMBOL(tcp_v4_md5_lookup);
930 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
931 struct request_sock *req)
933 union tcp_md5_addr *addr;
935 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
936 return tcp_md5_do_lookup(sk, addr, AF_INET);
939 /* This can be called on a newly created socket, from other files */
940 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
941 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
943 /* Add Key to the list */
944 struct tcp_md5sig_key *key;
945 struct tcp_sock *tp = tcp_sk(sk);
946 struct tcp_md5sig_info *md5sig;
948 key = tcp_md5_do_lookup(sk, addr, family);
949 if (key) {
950 /* Pre-existing entry - just update that one. */
951 memcpy(key->key, newkey, newkeylen);
952 key->keylen = newkeylen;
953 return 0;
956 md5sig = rcu_dereference_protected(tp->md5sig_info,
957 sock_owned_by_user(sk));
958 if (!md5sig) {
959 md5sig = kmalloc(sizeof(*md5sig), gfp);
960 if (!md5sig)
961 return -ENOMEM;
963 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
964 INIT_HLIST_HEAD(&md5sig->head);
965 rcu_assign_pointer(tp->md5sig_info, md5sig);
968 key = sock_kmalloc(sk, sizeof(*key), gfp);
969 if (!key)
970 return -ENOMEM;
971 if (!tcp_alloc_md5sig_pool()) {
972 sock_kfree_s(sk, key, sizeof(*key));
973 return -ENOMEM;
976 memcpy(key->key, newkey, newkeylen);
977 key->keylen = newkeylen;
978 key->family = family;
979 memcpy(&key->addr, addr,
980 (family == AF_INET6) ? sizeof(struct in6_addr) :
981 sizeof(struct in_addr));
982 hlist_add_head_rcu(&key->node, &md5sig->head);
983 return 0;
985 EXPORT_SYMBOL(tcp_md5_do_add);
987 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
989 struct tcp_md5sig_key *key;
991 key = tcp_md5_do_lookup(sk, addr, family);
992 if (!key)
993 return -ENOENT;
994 hlist_del_rcu(&key->node);
995 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
996 kfree_rcu(key, rcu);
997 return 0;
999 EXPORT_SYMBOL(tcp_md5_do_del);
1001 static void tcp_clear_md5_list(struct sock *sk)
1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_key *key;
1005 struct hlist_node *n;
1006 struct tcp_md5sig_info *md5sig;
1008 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1010 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1011 hlist_del_rcu(&key->node);
1012 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1013 kfree_rcu(key, rcu);
1017 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1018 int optlen)
1020 struct tcp_md5sig cmd;
1021 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1023 if (optlen < sizeof(cmd))
1024 return -EINVAL;
1026 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1027 return -EFAULT;
1029 if (sin->sin_family != AF_INET)
1030 return -EINVAL;
1032 if (!cmd.tcpm_keylen)
1033 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1034 AF_INET);
1036 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1037 return -EINVAL;
1039 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1040 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1041 GFP_KERNEL);
1044 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1045 __be32 daddr, __be32 saddr, int nbytes)
1047 struct tcp4_pseudohdr *bp;
1048 struct scatterlist sg;
1050 bp = &hp->md5_blk.ip4;
1053 * 1. the TCP pseudo-header (in the order: source IP address,
1054 * destination IP address, zero-padded protocol number, and
1055 * segment length)
1057 bp->saddr = saddr;
1058 bp->daddr = daddr;
1059 bp->pad = 0;
1060 bp->protocol = IPPROTO_TCP;
1061 bp->len = cpu_to_be16(nbytes);
1063 sg_init_one(&sg, bp, sizeof(*bp));
1064 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1067 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1068 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1070 struct tcp_md5sig_pool *hp;
1071 struct hash_desc *desc;
1073 hp = tcp_get_md5sig_pool();
1074 if (!hp)
1075 goto clear_hash_noput;
1076 desc = &hp->md5_desc;
1078 if (crypto_hash_init(desc))
1079 goto clear_hash;
1080 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1081 goto clear_hash;
1082 if (tcp_md5_hash_header(hp, th))
1083 goto clear_hash;
1084 if (tcp_md5_hash_key(hp, key))
1085 goto clear_hash;
1086 if (crypto_hash_final(desc, md5_hash))
1087 goto clear_hash;
1089 tcp_put_md5sig_pool();
1090 return 0;
1092 clear_hash:
1093 tcp_put_md5sig_pool();
1094 clear_hash_noput:
1095 memset(md5_hash, 0, 16);
1096 return 1;
1099 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1100 const struct sock *sk, const struct request_sock *req,
1101 const struct sk_buff *skb)
1103 struct tcp_md5sig_pool *hp;
1104 struct hash_desc *desc;
1105 const struct tcphdr *th = tcp_hdr(skb);
1106 __be32 saddr, daddr;
1108 if (sk) {
1109 saddr = inet_sk(sk)->inet_saddr;
1110 daddr = inet_sk(sk)->inet_daddr;
1111 } else if (req) {
1112 saddr = inet_rsk(req)->ir_loc_addr;
1113 daddr = inet_rsk(req)->ir_rmt_addr;
1114 } else {
1115 const struct iphdr *iph = ip_hdr(skb);
1116 saddr = iph->saddr;
1117 daddr = iph->daddr;
1120 hp = tcp_get_md5sig_pool();
1121 if (!hp)
1122 goto clear_hash_noput;
1123 desc = &hp->md5_desc;
1125 if (crypto_hash_init(desc))
1126 goto clear_hash;
1128 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1129 goto clear_hash;
1130 if (tcp_md5_hash_header(hp, th))
1131 goto clear_hash;
1132 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1133 goto clear_hash;
1134 if (tcp_md5_hash_key(hp, key))
1135 goto clear_hash;
1136 if (crypto_hash_final(desc, md5_hash))
1137 goto clear_hash;
1139 tcp_put_md5sig_pool();
1140 return 0;
1142 clear_hash:
1143 tcp_put_md5sig_pool();
1144 clear_hash_noput:
1145 memset(md5_hash, 0, 16);
1146 return 1;
1148 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1150 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1151 const struct sk_buff *skb)
1154 * This gets called for each TCP segment that arrives
1155 * so we want to be efficient.
1156 * We have 3 drop cases:
1157 * o No MD5 hash and one expected.
1158 * o MD5 hash and we're not expecting one.
1159 * o MD5 hash and its wrong.
1161 const __u8 *hash_location = NULL;
1162 struct tcp_md5sig_key *hash_expected;
1163 const struct iphdr *iph = ip_hdr(skb);
1164 const struct tcphdr *th = tcp_hdr(skb);
1165 int genhash;
1166 unsigned char newhash[16];
1168 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1169 AF_INET);
1170 hash_location = tcp_parse_md5sig_option(th);
1172 /* We've parsed the options - do we have a hash? */
1173 if (!hash_expected && !hash_location)
1174 return false;
1176 if (hash_expected && !hash_location) {
1177 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1178 return true;
1181 if (!hash_expected && hash_location) {
1182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1183 return true;
1186 /* Okay, so this is hash_expected and hash_location -
1187 * so we need to calculate the checksum.
1189 genhash = tcp_v4_md5_hash_skb(newhash,
1190 hash_expected,
1191 NULL, NULL, skb);
1193 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1194 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1195 &iph->saddr, ntohs(th->source),
1196 &iph->daddr, ntohs(th->dest),
1197 genhash ? " tcp_v4_calc_md5_hash failed"
1198 : "");
1199 return true;
1201 return false;
1204 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1206 bool ret;
1208 rcu_read_lock();
1209 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1210 rcu_read_unlock();
1212 return ret;
1215 #endif
1217 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1218 struct sk_buff *skb)
1220 struct inet_request_sock *ireq = inet_rsk(req);
1222 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1223 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1224 ireq->no_srccheck = inet_sk(sk)->transparent;
1225 ireq->opt = tcp_v4_save_options(skb);
1228 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1229 const struct request_sock *req,
1230 bool *strict)
1232 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1234 if (strict) {
1235 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1236 *strict = true;
1237 else
1238 *strict = false;
1241 return dst;
1244 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1245 .family = PF_INET,
1246 .obj_size = sizeof(struct tcp_request_sock),
1247 .rtx_syn_ack = tcp_rtx_synack,
1248 .send_ack = tcp_v4_reqsk_send_ack,
1249 .destructor = tcp_v4_reqsk_destructor,
1250 .send_reset = tcp_v4_send_reset,
1251 .syn_ack_timeout = tcp_syn_ack_timeout,
1254 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1255 .mss_clamp = TCP_MSS_DEFAULT,
1256 #ifdef CONFIG_TCP_MD5SIG
1257 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1258 .calc_md5_hash = tcp_v4_md5_hash_skb,
1259 #endif
1260 .init_req = tcp_v4_init_req,
1261 #ifdef CONFIG_SYN_COOKIES
1262 .cookie_init_seq = cookie_v4_init_sequence,
1263 #endif
1264 .route_req = tcp_v4_route_req,
1265 .init_seq = tcp_v4_init_sequence,
1266 .send_synack = tcp_v4_send_synack,
1267 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1270 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1272 /* Never answer to SYNs send to broadcast or multicast */
1273 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1274 goto drop;
1276 return tcp_conn_request(&tcp_request_sock_ops,
1277 &tcp_request_sock_ipv4_ops, sk, skb);
1279 drop:
1280 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1281 return 0;
1283 EXPORT_SYMBOL(tcp_v4_conn_request);
1287 * The three way handshake has completed - we got a valid synack -
1288 * now create the new socket.
1290 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1291 struct request_sock *req,
1292 struct dst_entry *dst)
1294 struct inet_request_sock *ireq;
1295 struct inet_sock *newinet;
1296 struct tcp_sock *newtp;
1297 struct sock *newsk;
1298 #ifdef CONFIG_TCP_MD5SIG
1299 struct tcp_md5sig_key *key;
1300 #endif
1301 struct ip_options_rcu *inet_opt;
1303 if (sk_acceptq_is_full(sk))
1304 goto exit_overflow;
1306 newsk = tcp_create_openreq_child(sk, req, skb);
1307 if (!newsk)
1308 goto exit_nonewsk;
1310 newsk->sk_gso_type = SKB_GSO_TCPV4;
1311 inet_sk_rx_dst_set(newsk, skb);
1313 newtp = tcp_sk(newsk);
1314 newinet = inet_sk(newsk);
1315 ireq = inet_rsk(req);
1316 newinet->inet_daddr = ireq->ir_rmt_addr;
1317 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1318 newinet->inet_saddr = ireq->ir_loc_addr;
1319 inet_opt = ireq->opt;
1320 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1321 ireq->opt = NULL;
1322 newinet->mc_index = inet_iif(skb);
1323 newinet->mc_ttl = ip_hdr(skb)->ttl;
1324 newinet->rcv_tos = ip_hdr(skb)->tos;
1325 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1326 inet_set_txhash(newsk);
1327 if (inet_opt)
1328 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1329 newinet->inet_id = newtp->write_seq ^ jiffies;
1331 if (!dst) {
1332 dst = inet_csk_route_child_sock(sk, newsk, req);
1333 if (!dst)
1334 goto put_and_exit;
1335 } else {
1336 /* syncookie case : see end of cookie_v4_check() */
1338 sk_setup_caps(newsk, dst);
1340 tcp_sync_mss(newsk, dst_mtu(dst));
1341 newtp->advmss = dst_metric_advmss(dst);
1342 if (tcp_sk(sk)->rx_opt.user_mss &&
1343 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1344 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1346 tcp_initialize_rcv_mss(newsk);
1348 #ifdef CONFIG_TCP_MD5SIG
1349 /* Copy over the MD5 key from the original socket */
1350 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1351 AF_INET);
1352 if (key != NULL) {
1354 * We're using one, so create a matching key
1355 * on the newsk structure. If we fail to get
1356 * memory, then we end up not copying the key
1357 * across. Shucks.
1359 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1360 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1361 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1363 #endif
1365 if (__inet_inherit_port(sk, newsk) < 0)
1366 goto put_and_exit;
1367 __inet_hash_nolisten(newsk, NULL);
1369 return newsk;
1371 exit_overflow:
1372 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1373 exit_nonewsk:
1374 dst_release(dst);
1375 exit:
1376 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1377 return NULL;
1378 put_and_exit:
1379 inet_csk_prepare_forced_close(newsk);
1380 tcp_done(newsk);
1381 goto exit;
1383 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1385 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1387 struct tcphdr *th = tcp_hdr(skb);
1388 const struct iphdr *iph = ip_hdr(skb);
1389 struct sock *nsk;
1390 struct request_sock **prev;
1391 /* Find possible connection requests. */
1392 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1393 iph->saddr, iph->daddr);
1394 if (req)
1395 return tcp_check_req(sk, skb, req, prev, false);
1397 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1398 th->source, iph->daddr, th->dest, inet_iif(skb));
1400 if (nsk) {
1401 if (nsk->sk_state != TCP_TIME_WAIT) {
1402 bh_lock_sock(nsk);
1403 return nsk;
1405 inet_twsk_put(inet_twsk(nsk));
1406 return NULL;
1409 #ifdef CONFIG_SYN_COOKIES
1410 if (!th->syn)
1411 sk = cookie_v4_check(sk, skb);
1412 #endif
1413 return sk;
1416 /* The socket must have it's spinlock held when we get
1417 * here.
1419 * We have a potential double-lock case here, so even when
1420 * doing backlog processing we use the BH locking scheme.
1421 * This is because we cannot sleep with the original spinlock
1422 * held.
1424 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1426 struct sock *rsk;
1428 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1429 struct dst_entry *dst = sk->sk_rx_dst;
1431 sock_rps_save_rxhash(sk, skb);
1432 sk_mark_napi_id(sk, skb);
1433 if (dst) {
1434 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1435 dst->ops->check(dst, 0) == NULL) {
1436 dst_release(dst);
1437 sk->sk_rx_dst = NULL;
1440 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1441 return 0;
1444 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1445 goto csum_err;
1447 if (sk->sk_state == TCP_LISTEN) {
1448 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1449 if (!nsk)
1450 goto discard;
1452 if (nsk != sk) {
1453 sock_rps_save_rxhash(nsk, skb);
1454 sk_mark_napi_id(sk, skb);
1455 if (tcp_child_process(sk, nsk, skb)) {
1456 rsk = nsk;
1457 goto reset;
1459 return 0;
1461 } else
1462 sock_rps_save_rxhash(sk, skb);
1464 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1465 rsk = sk;
1466 goto reset;
1468 return 0;
1470 reset:
1471 tcp_v4_send_reset(rsk, skb);
1472 discard:
1473 kfree_skb(skb);
1474 /* Be careful here. If this function gets more complicated and
1475 * gcc suffers from register pressure on the x86, sk (in %ebx)
1476 * might be destroyed here. This current version compiles correctly,
1477 * but you have been warned.
1479 return 0;
1481 csum_err:
1482 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1483 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1484 goto discard;
1486 EXPORT_SYMBOL(tcp_v4_do_rcv);
1488 void tcp_v4_early_demux(struct sk_buff *skb)
1490 const struct iphdr *iph;
1491 const struct tcphdr *th;
1492 struct sock *sk;
1494 if (skb->pkt_type != PACKET_HOST)
1495 return;
1497 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1498 return;
1500 iph = ip_hdr(skb);
1501 th = tcp_hdr(skb);
1503 if (th->doff < sizeof(struct tcphdr) / 4)
1504 return;
1506 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1507 iph->saddr, th->source,
1508 iph->daddr, ntohs(th->dest),
1509 skb->skb_iif);
1510 if (sk) {
1511 skb->sk = sk;
1512 skb->destructor = sock_edemux;
1513 if (sk->sk_state != TCP_TIME_WAIT) {
1514 struct dst_entry *dst = sk->sk_rx_dst;
1516 if (dst)
1517 dst = dst_check(dst, 0);
1518 if (dst &&
1519 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1520 skb_dst_set_noref(skb, dst);
1525 /* Packet is added to VJ-style prequeue for processing in process
1526 * context, if a reader task is waiting. Apparently, this exciting
1527 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1528 * failed somewhere. Latency? Burstiness? Well, at least now we will
1529 * see, why it failed. 8)8) --ANK
1532 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1534 struct tcp_sock *tp = tcp_sk(sk);
1536 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1537 return false;
1539 if (skb->len <= tcp_hdrlen(skb) &&
1540 skb_queue_len(&tp->ucopy.prequeue) == 0)
1541 return false;
1543 /* Before escaping RCU protected region, we need to take care of skb
1544 * dst. Prequeue is only enabled for established sockets.
1545 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1546 * Instead of doing full sk_rx_dst validity here, let's perform
1547 * an optimistic check.
1549 if (likely(sk->sk_rx_dst))
1550 skb_dst_drop(skb);
1551 else
1552 skb_dst_force(skb);
1554 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1555 tp->ucopy.memory += skb->truesize;
1556 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1557 struct sk_buff *skb1;
1559 BUG_ON(sock_owned_by_user(sk));
1561 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1562 sk_backlog_rcv(sk, skb1);
1563 NET_INC_STATS_BH(sock_net(sk),
1564 LINUX_MIB_TCPPREQUEUEDROPPED);
1567 tp->ucopy.memory = 0;
1568 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1569 wake_up_interruptible_sync_poll(sk_sleep(sk),
1570 POLLIN | POLLRDNORM | POLLRDBAND);
1571 if (!inet_csk_ack_scheduled(sk))
1572 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1573 (3 * tcp_rto_min(sk)) / 4,
1574 TCP_RTO_MAX);
1576 return true;
1578 EXPORT_SYMBOL(tcp_prequeue);
1581 * From tcp_input.c
1584 int tcp_v4_rcv(struct sk_buff *skb)
1586 const struct iphdr *iph;
1587 const struct tcphdr *th;
1588 struct sock *sk;
1589 int ret;
1590 struct net *net = dev_net(skb->dev);
1592 if (skb->pkt_type != PACKET_HOST)
1593 goto discard_it;
1595 /* Count it even if it's bad */
1596 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1598 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1599 goto discard_it;
1601 th = tcp_hdr(skb);
1603 if (th->doff < sizeof(struct tcphdr) / 4)
1604 goto bad_packet;
1605 if (!pskb_may_pull(skb, th->doff * 4))
1606 goto discard_it;
1608 /* An explanation is required here, I think.
1609 * Packet length and doff are validated by header prediction,
1610 * provided case of th->doff==0 is eliminated.
1611 * So, we defer the checks. */
1613 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1614 goto csum_error;
1616 th = tcp_hdr(skb);
1617 iph = ip_hdr(skb);
1618 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1619 * barrier() makes sure compiler wont play fool^Waliasing games.
1621 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1622 sizeof(struct inet_skb_parm));
1623 barrier();
1625 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1626 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1627 skb->len - th->doff * 4);
1628 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1629 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1630 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1631 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1632 TCP_SKB_CB(skb)->sacked = 0;
1634 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1635 if (!sk)
1636 goto no_tcp_socket;
1638 process:
1639 if (sk->sk_state == TCP_TIME_WAIT)
1640 goto do_time_wait;
1642 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1643 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1644 goto discard_and_relse;
1647 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1648 goto discard_and_relse;
1650 #ifdef CONFIG_TCP_MD5SIG
1652 * We really want to reject the packet as early as possible
1653 * if:
1654 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1655 * o There is an MD5 option and we're not expecting one
1657 if (tcp_v4_inbound_md5_hash(sk, skb))
1658 goto discard_and_relse;
1659 #endif
1661 nf_reset(skb);
1663 if (sk_filter(sk, skb))
1664 goto discard_and_relse;
1666 sk_incoming_cpu_update(sk);
1667 skb->dev = NULL;
1669 bh_lock_sock_nested(sk);
1670 ret = 0;
1671 if (!sock_owned_by_user(sk)) {
1672 if (!tcp_prequeue(sk, skb))
1673 ret = tcp_v4_do_rcv(sk, skb);
1674 } else if (unlikely(sk_add_backlog(sk, skb,
1675 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1676 bh_unlock_sock(sk);
1677 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1678 goto discard_and_relse;
1680 bh_unlock_sock(sk);
1682 sock_put(sk);
1684 return ret;
1686 no_tcp_socket:
1687 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1688 goto discard_it;
1690 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1691 csum_error:
1692 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1693 bad_packet:
1694 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1695 } else {
1696 tcp_v4_send_reset(NULL, skb);
1699 discard_it:
1700 /* Discard frame. */
1701 kfree_skb(skb);
1702 return 0;
1704 discard_and_relse:
1705 sock_put(sk);
1706 goto discard_it;
1708 do_time_wait:
1709 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1710 inet_twsk_put(inet_twsk(sk));
1711 goto discard_it;
1714 if (skb->len < (th->doff << 2)) {
1715 inet_twsk_put(inet_twsk(sk));
1716 goto bad_packet;
1718 if (tcp_checksum_complete(skb)) {
1719 inet_twsk_put(inet_twsk(sk));
1720 goto csum_error;
1722 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1723 case TCP_TW_SYN: {
1724 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1725 &tcp_hashinfo,
1726 iph->saddr, th->source,
1727 iph->daddr, th->dest,
1728 inet_iif(skb));
1729 if (sk2) {
1730 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1731 inet_twsk_put(inet_twsk(sk));
1732 sk = sk2;
1733 goto process;
1735 /* Fall through to ACK */
1737 case TCP_TW_ACK:
1738 tcp_v4_timewait_ack(sk, skb);
1739 break;
1740 case TCP_TW_RST:
1741 goto no_tcp_socket;
1742 case TCP_TW_SUCCESS:;
1744 goto discard_it;
1747 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1748 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1749 .twsk_unique = tcp_twsk_unique,
1750 .twsk_destructor= tcp_twsk_destructor,
1753 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1755 struct dst_entry *dst = skb_dst(skb);
1757 if (dst) {
1758 dst_hold(dst);
1759 sk->sk_rx_dst = dst;
1760 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1763 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1765 const struct inet_connection_sock_af_ops ipv4_specific = {
1766 .queue_xmit = ip_queue_xmit,
1767 .send_check = tcp_v4_send_check,
1768 .rebuild_header = inet_sk_rebuild_header,
1769 .sk_rx_dst_set = inet_sk_rx_dst_set,
1770 .conn_request = tcp_v4_conn_request,
1771 .syn_recv_sock = tcp_v4_syn_recv_sock,
1772 .net_header_len = sizeof(struct iphdr),
1773 .setsockopt = ip_setsockopt,
1774 .getsockopt = ip_getsockopt,
1775 .addr2sockaddr = inet_csk_addr2sockaddr,
1776 .sockaddr_len = sizeof(struct sockaddr_in),
1777 .bind_conflict = inet_csk_bind_conflict,
1778 #ifdef CONFIG_COMPAT
1779 .compat_setsockopt = compat_ip_setsockopt,
1780 .compat_getsockopt = compat_ip_getsockopt,
1781 #endif
1782 .mtu_reduced = tcp_v4_mtu_reduced,
1784 EXPORT_SYMBOL(ipv4_specific);
1786 #ifdef CONFIG_TCP_MD5SIG
1787 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1788 .md5_lookup = tcp_v4_md5_lookup,
1789 .calc_md5_hash = tcp_v4_md5_hash_skb,
1790 .md5_parse = tcp_v4_parse_md5_keys,
1792 #endif
1794 /* NOTE: A lot of things set to zero explicitly by call to
1795 * sk_alloc() so need not be done here.
1797 static int tcp_v4_init_sock(struct sock *sk)
1799 struct inet_connection_sock *icsk = inet_csk(sk);
1801 tcp_init_sock(sk);
1803 icsk->icsk_af_ops = &ipv4_specific;
1805 #ifdef CONFIG_TCP_MD5SIG
1806 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1807 #endif
1809 return 0;
1812 void tcp_v4_destroy_sock(struct sock *sk)
1814 struct tcp_sock *tp = tcp_sk(sk);
1816 tcp_clear_xmit_timers(sk);
1818 tcp_cleanup_congestion_control(sk);
1820 /* Cleanup up the write buffer. */
1821 tcp_write_queue_purge(sk);
1823 /* Cleans up our, hopefully empty, out_of_order_queue. */
1824 __skb_queue_purge(&tp->out_of_order_queue);
1826 #ifdef CONFIG_TCP_MD5SIG
1827 /* Clean up the MD5 key list, if any */
1828 if (tp->md5sig_info) {
1829 tcp_clear_md5_list(sk);
1830 kfree_rcu(tp->md5sig_info, rcu);
1831 tp->md5sig_info = NULL;
1833 #endif
1835 /* Clean prequeue, it must be empty really */
1836 __skb_queue_purge(&tp->ucopy.prequeue);
1838 /* Clean up a referenced TCP bind bucket. */
1839 if (inet_csk(sk)->icsk_bind_hash)
1840 inet_put_port(sk);
1842 BUG_ON(tp->fastopen_rsk != NULL);
1844 /* If socket is aborted during connect operation */
1845 tcp_free_fastopen_req(tp);
1847 sk_sockets_allocated_dec(sk);
1848 sock_release_memcg(sk);
1850 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1852 #ifdef CONFIG_PROC_FS
1853 /* Proc filesystem TCP sock list dumping. */
1856 * Get next listener socket follow cur. If cur is NULL, get first socket
1857 * starting from bucket given in st->bucket; when st->bucket is zero the
1858 * very first socket in the hash table is returned.
1860 static void *listening_get_next(struct seq_file *seq, void *cur)
1862 struct inet_connection_sock *icsk;
1863 struct hlist_nulls_node *node;
1864 struct sock *sk = cur;
1865 struct inet_listen_hashbucket *ilb;
1866 struct tcp_iter_state *st = seq->private;
1867 struct net *net = seq_file_net(seq);
1869 if (!sk) {
1870 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1871 spin_lock_bh(&ilb->lock);
1872 sk = sk_nulls_head(&ilb->head);
1873 st->offset = 0;
1874 goto get_sk;
1876 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1877 ++st->num;
1878 ++st->offset;
1880 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1881 struct request_sock *req = cur;
1883 icsk = inet_csk(st->syn_wait_sk);
1884 req = req->dl_next;
1885 while (1) {
1886 while (req) {
1887 if (req->rsk_ops->family == st->family) {
1888 cur = req;
1889 goto out;
1891 req = req->dl_next;
1893 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1894 break;
1895 get_req:
1896 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1898 sk = sk_nulls_next(st->syn_wait_sk);
1899 st->state = TCP_SEQ_STATE_LISTENING;
1900 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1901 } else {
1902 icsk = inet_csk(sk);
1903 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1904 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1905 goto start_req;
1906 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1907 sk = sk_nulls_next(sk);
1909 get_sk:
1910 sk_nulls_for_each_from(sk, node) {
1911 if (!net_eq(sock_net(sk), net))
1912 continue;
1913 if (sk->sk_family == st->family) {
1914 cur = sk;
1915 goto out;
1917 icsk = inet_csk(sk);
1918 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1919 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1920 start_req:
1921 st->uid = sock_i_uid(sk);
1922 st->syn_wait_sk = sk;
1923 st->state = TCP_SEQ_STATE_OPENREQ;
1924 st->sbucket = 0;
1925 goto get_req;
1927 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1929 spin_unlock_bh(&ilb->lock);
1930 st->offset = 0;
1931 if (++st->bucket < INET_LHTABLE_SIZE) {
1932 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1933 spin_lock_bh(&ilb->lock);
1934 sk = sk_nulls_head(&ilb->head);
1935 goto get_sk;
1937 cur = NULL;
1938 out:
1939 return cur;
1942 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1944 struct tcp_iter_state *st = seq->private;
1945 void *rc;
1947 st->bucket = 0;
1948 st->offset = 0;
1949 rc = listening_get_next(seq, NULL);
1951 while (rc && *pos) {
1952 rc = listening_get_next(seq, rc);
1953 --*pos;
1955 return rc;
1958 static inline bool empty_bucket(const struct tcp_iter_state *st)
1960 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1964 * Get first established socket starting from bucket given in st->bucket.
1965 * If st->bucket is zero, the very first socket in the hash is returned.
1967 static void *established_get_first(struct seq_file *seq)
1969 struct tcp_iter_state *st = seq->private;
1970 struct net *net = seq_file_net(seq);
1971 void *rc = NULL;
1973 st->offset = 0;
1974 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1975 struct sock *sk;
1976 struct hlist_nulls_node *node;
1977 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1979 /* Lockless fast path for the common case of empty buckets */
1980 if (empty_bucket(st))
1981 continue;
1983 spin_lock_bh(lock);
1984 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1985 if (sk->sk_family != st->family ||
1986 !net_eq(sock_net(sk), net)) {
1987 continue;
1989 rc = sk;
1990 goto out;
1992 spin_unlock_bh(lock);
1994 out:
1995 return rc;
1998 static void *established_get_next(struct seq_file *seq, void *cur)
2000 struct sock *sk = cur;
2001 struct hlist_nulls_node *node;
2002 struct tcp_iter_state *st = seq->private;
2003 struct net *net = seq_file_net(seq);
2005 ++st->num;
2006 ++st->offset;
2008 sk = sk_nulls_next(sk);
2010 sk_nulls_for_each_from(sk, node) {
2011 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2012 return sk;
2015 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2016 ++st->bucket;
2017 return established_get_first(seq);
2020 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2022 struct tcp_iter_state *st = seq->private;
2023 void *rc;
2025 st->bucket = 0;
2026 rc = established_get_first(seq);
2028 while (rc && pos) {
2029 rc = established_get_next(seq, rc);
2030 --pos;
2032 return rc;
2035 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2037 void *rc;
2038 struct tcp_iter_state *st = seq->private;
2040 st->state = TCP_SEQ_STATE_LISTENING;
2041 rc = listening_get_idx(seq, &pos);
2043 if (!rc) {
2044 st->state = TCP_SEQ_STATE_ESTABLISHED;
2045 rc = established_get_idx(seq, pos);
2048 return rc;
2051 static void *tcp_seek_last_pos(struct seq_file *seq)
2053 struct tcp_iter_state *st = seq->private;
2054 int offset = st->offset;
2055 int orig_num = st->num;
2056 void *rc = NULL;
2058 switch (st->state) {
2059 case TCP_SEQ_STATE_OPENREQ:
2060 case TCP_SEQ_STATE_LISTENING:
2061 if (st->bucket >= INET_LHTABLE_SIZE)
2062 break;
2063 st->state = TCP_SEQ_STATE_LISTENING;
2064 rc = listening_get_next(seq, NULL);
2065 while (offset-- && rc)
2066 rc = listening_get_next(seq, rc);
2067 if (rc)
2068 break;
2069 st->bucket = 0;
2070 st->state = TCP_SEQ_STATE_ESTABLISHED;
2071 /* Fallthrough */
2072 case TCP_SEQ_STATE_ESTABLISHED:
2073 if (st->bucket > tcp_hashinfo.ehash_mask)
2074 break;
2075 rc = established_get_first(seq);
2076 while (offset-- && rc)
2077 rc = established_get_next(seq, rc);
2080 st->num = orig_num;
2082 return rc;
2085 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2087 struct tcp_iter_state *st = seq->private;
2088 void *rc;
2090 if (*pos && *pos == st->last_pos) {
2091 rc = tcp_seek_last_pos(seq);
2092 if (rc)
2093 goto out;
2096 st->state = TCP_SEQ_STATE_LISTENING;
2097 st->num = 0;
2098 st->bucket = 0;
2099 st->offset = 0;
2100 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2102 out:
2103 st->last_pos = *pos;
2104 return rc;
2107 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2109 struct tcp_iter_state *st = seq->private;
2110 void *rc = NULL;
2112 if (v == SEQ_START_TOKEN) {
2113 rc = tcp_get_idx(seq, 0);
2114 goto out;
2117 switch (st->state) {
2118 case TCP_SEQ_STATE_OPENREQ:
2119 case TCP_SEQ_STATE_LISTENING:
2120 rc = listening_get_next(seq, v);
2121 if (!rc) {
2122 st->state = TCP_SEQ_STATE_ESTABLISHED;
2123 st->bucket = 0;
2124 st->offset = 0;
2125 rc = established_get_first(seq);
2127 break;
2128 case TCP_SEQ_STATE_ESTABLISHED:
2129 rc = established_get_next(seq, v);
2130 break;
2132 out:
2133 ++*pos;
2134 st->last_pos = *pos;
2135 return rc;
2138 static void tcp_seq_stop(struct seq_file *seq, void *v)
2140 struct tcp_iter_state *st = seq->private;
2142 switch (st->state) {
2143 case TCP_SEQ_STATE_OPENREQ:
2144 if (v) {
2145 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2146 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2148 case TCP_SEQ_STATE_LISTENING:
2149 if (v != SEQ_START_TOKEN)
2150 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2151 break;
2152 case TCP_SEQ_STATE_ESTABLISHED:
2153 if (v)
2154 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2155 break;
2159 int tcp_seq_open(struct inode *inode, struct file *file)
2161 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2162 struct tcp_iter_state *s;
2163 int err;
2165 err = seq_open_net(inode, file, &afinfo->seq_ops,
2166 sizeof(struct tcp_iter_state));
2167 if (err < 0)
2168 return err;
2170 s = ((struct seq_file *)file->private_data)->private;
2171 s->family = afinfo->family;
2172 s->last_pos = 0;
2173 return 0;
2175 EXPORT_SYMBOL(tcp_seq_open);
2177 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2179 int rc = 0;
2180 struct proc_dir_entry *p;
2182 afinfo->seq_ops.start = tcp_seq_start;
2183 afinfo->seq_ops.next = tcp_seq_next;
2184 afinfo->seq_ops.stop = tcp_seq_stop;
2186 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2187 afinfo->seq_fops, afinfo);
2188 if (!p)
2189 rc = -ENOMEM;
2190 return rc;
2192 EXPORT_SYMBOL(tcp_proc_register);
2194 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2196 remove_proc_entry(afinfo->name, net->proc_net);
2198 EXPORT_SYMBOL(tcp_proc_unregister);
2200 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2201 struct seq_file *f, int i, kuid_t uid)
2203 const struct inet_request_sock *ireq = inet_rsk(req);
2204 long delta = req->expires - jiffies;
2206 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2207 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2209 ireq->ir_loc_addr,
2210 ntohs(inet_sk(sk)->inet_sport),
2211 ireq->ir_rmt_addr,
2212 ntohs(ireq->ir_rmt_port),
2213 TCP_SYN_RECV,
2214 0, 0, /* could print option size, but that is af dependent. */
2215 1, /* timers active (only the expire timer) */
2216 jiffies_delta_to_clock_t(delta),
2217 req->num_timeout,
2218 from_kuid_munged(seq_user_ns(f), uid),
2219 0, /* non standard timer */
2220 0, /* open_requests have no inode */
2221 atomic_read(&sk->sk_refcnt),
2222 req);
2225 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2227 int timer_active;
2228 unsigned long timer_expires;
2229 const struct tcp_sock *tp = tcp_sk(sk);
2230 const struct inet_connection_sock *icsk = inet_csk(sk);
2231 const struct inet_sock *inet = inet_sk(sk);
2232 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2233 __be32 dest = inet->inet_daddr;
2234 __be32 src = inet->inet_rcv_saddr;
2235 __u16 destp = ntohs(inet->inet_dport);
2236 __u16 srcp = ntohs(inet->inet_sport);
2237 int rx_queue;
2239 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2240 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2241 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2242 timer_active = 1;
2243 timer_expires = icsk->icsk_timeout;
2244 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2245 timer_active = 4;
2246 timer_expires = icsk->icsk_timeout;
2247 } else if (timer_pending(&sk->sk_timer)) {
2248 timer_active = 2;
2249 timer_expires = sk->sk_timer.expires;
2250 } else {
2251 timer_active = 0;
2252 timer_expires = jiffies;
2255 if (sk->sk_state == TCP_LISTEN)
2256 rx_queue = sk->sk_ack_backlog;
2257 else
2259 * because we dont lock socket, we might find a transient negative value
2261 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2263 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2264 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2265 i, src, srcp, dest, destp, sk->sk_state,
2266 tp->write_seq - tp->snd_una,
2267 rx_queue,
2268 timer_active,
2269 jiffies_delta_to_clock_t(timer_expires - jiffies),
2270 icsk->icsk_retransmits,
2271 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2272 icsk->icsk_probes_out,
2273 sock_i_ino(sk),
2274 atomic_read(&sk->sk_refcnt), sk,
2275 jiffies_to_clock_t(icsk->icsk_rto),
2276 jiffies_to_clock_t(icsk->icsk_ack.ato),
2277 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2278 tp->snd_cwnd,
2279 sk->sk_state == TCP_LISTEN ?
2280 (fastopenq ? fastopenq->max_qlen : 0) :
2281 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2284 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2285 struct seq_file *f, int i)
2287 __be32 dest, src;
2288 __u16 destp, srcp;
2289 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2291 dest = tw->tw_daddr;
2292 src = tw->tw_rcv_saddr;
2293 destp = ntohs(tw->tw_dport);
2294 srcp = ntohs(tw->tw_sport);
2296 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2297 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2298 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2299 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2300 atomic_read(&tw->tw_refcnt), tw);
2303 #define TMPSZ 150
2305 static int tcp4_seq_show(struct seq_file *seq, void *v)
2307 struct tcp_iter_state *st;
2308 struct sock *sk = v;
2310 seq_setwidth(seq, TMPSZ - 1);
2311 if (v == SEQ_START_TOKEN) {
2312 seq_puts(seq, " sl local_address rem_address st tx_queue "
2313 "rx_queue tr tm->when retrnsmt uid timeout "
2314 "inode");
2315 goto out;
2317 st = seq->private;
2319 switch (st->state) {
2320 case TCP_SEQ_STATE_LISTENING:
2321 case TCP_SEQ_STATE_ESTABLISHED:
2322 if (sk->sk_state == TCP_TIME_WAIT)
2323 get_timewait4_sock(v, seq, st->num);
2324 else
2325 get_tcp4_sock(v, seq, st->num);
2326 break;
2327 case TCP_SEQ_STATE_OPENREQ:
2328 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2329 break;
2331 out:
2332 seq_pad(seq, '\n');
2333 return 0;
2336 static const struct file_operations tcp_afinfo_seq_fops = {
2337 .owner = THIS_MODULE,
2338 .open = tcp_seq_open,
2339 .read = seq_read,
2340 .llseek = seq_lseek,
2341 .release = seq_release_net
2344 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2345 .name = "tcp",
2346 .family = AF_INET,
2347 .seq_fops = &tcp_afinfo_seq_fops,
2348 .seq_ops = {
2349 .show = tcp4_seq_show,
2353 static int __net_init tcp4_proc_init_net(struct net *net)
2355 return tcp_proc_register(net, &tcp4_seq_afinfo);
2358 static void __net_exit tcp4_proc_exit_net(struct net *net)
2360 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2363 static struct pernet_operations tcp4_net_ops = {
2364 .init = tcp4_proc_init_net,
2365 .exit = tcp4_proc_exit_net,
2368 int __init tcp4_proc_init(void)
2370 return register_pernet_subsys(&tcp4_net_ops);
2373 void tcp4_proc_exit(void)
2375 unregister_pernet_subsys(&tcp4_net_ops);
2377 #endif /* CONFIG_PROC_FS */
2379 struct proto tcp_prot = {
2380 .name = "TCP",
2381 .owner = THIS_MODULE,
2382 .close = tcp_close,
2383 .connect = tcp_v4_connect,
2384 .disconnect = tcp_disconnect,
2385 .accept = inet_csk_accept,
2386 .ioctl = tcp_ioctl,
2387 .init = tcp_v4_init_sock,
2388 .destroy = tcp_v4_destroy_sock,
2389 .shutdown = tcp_shutdown,
2390 .setsockopt = tcp_setsockopt,
2391 .getsockopt = tcp_getsockopt,
2392 .recvmsg = tcp_recvmsg,
2393 .sendmsg = tcp_sendmsg,
2394 .sendpage = tcp_sendpage,
2395 .backlog_rcv = tcp_v4_do_rcv,
2396 .release_cb = tcp_release_cb,
2397 .hash = inet_hash,
2398 .unhash = inet_unhash,
2399 .get_port = inet_csk_get_port,
2400 .enter_memory_pressure = tcp_enter_memory_pressure,
2401 .stream_memory_free = tcp_stream_memory_free,
2402 .sockets_allocated = &tcp_sockets_allocated,
2403 .orphan_count = &tcp_orphan_count,
2404 .memory_allocated = &tcp_memory_allocated,
2405 .memory_pressure = &tcp_memory_pressure,
2406 .sysctl_mem = sysctl_tcp_mem,
2407 .sysctl_wmem = sysctl_tcp_wmem,
2408 .sysctl_rmem = sysctl_tcp_rmem,
2409 .max_header = MAX_TCP_HEADER,
2410 .obj_size = sizeof(struct tcp_sock),
2411 .slab_flags = SLAB_DESTROY_BY_RCU,
2412 .twsk_prot = &tcp_timewait_sock_ops,
2413 .rsk_prot = &tcp_request_sock_ops,
2414 .h.hashinfo = &tcp_hashinfo,
2415 .no_autobind = true,
2416 #ifdef CONFIG_COMPAT
2417 .compat_setsockopt = compat_tcp_setsockopt,
2418 .compat_getsockopt = compat_tcp_getsockopt,
2419 #endif
2420 #ifdef CONFIG_MEMCG_KMEM
2421 .init_cgroup = tcp_init_cgroup,
2422 .destroy_cgroup = tcp_destroy_cgroup,
2423 .proto_cgroup = tcp_proto_cgroup,
2424 #endif
2426 EXPORT_SYMBOL(tcp_prot);
2428 static int __net_init tcp_sk_init(struct net *net)
2430 net->ipv4.sysctl_tcp_ecn = 2;
2431 return 0;
2434 static void __net_exit tcp_sk_exit(struct net *net)
2438 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2440 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2443 static struct pernet_operations __net_initdata tcp_sk_ops = {
2444 .init = tcp_sk_init,
2445 .exit = tcp_sk_exit,
2446 .exit_batch = tcp_sk_exit_batch,
2449 void __init tcp_v4_init(void)
2451 inet_hashinfo_init(&tcp_hashinfo);
2452 if (register_pernet_subsys(&tcp_sk_ops))
2453 panic("Failed to create the TCP control socket.\n");