octeontx2-pf: Fix error return code in otx2_probe()
[linux/fpc-iii.git] / net / ipv4 / tcp_timer.c
blobada046f425d248446a1aa8b1271cc35cab492be7
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
28 struct inet_connection_sock *icsk = inet_csk(sk);
29 u32 elapsed, start_ts;
30 s32 remaining;
32 start_ts = tcp_sk(sk)->retrans_stamp;
33 if (!icsk->icsk_user_timeout)
34 return icsk->icsk_rto;
35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
36 remaining = icsk->icsk_user_timeout - elapsed;
37 if (remaining <= 0)
38 return 1; /* user timeout has passed; fire ASAP */
40 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
43 /**
44 * tcp_write_err() - close socket and save error info
45 * @sk: The socket the error has appeared on.
47 * Returns: Nothing (void)
50 static void tcp_write_err(struct sock *sk)
52 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
53 sk->sk_error_report(sk);
55 tcp_write_queue_purge(sk);
56 tcp_done(sk);
57 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
60 /**
61 * tcp_out_of_resources() - Close socket if out of resources
62 * @sk: pointer to current socket
63 * @do_reset: send a last packet with reset flag
65 * Do not allow orphaned sockets to eat all our resources.
66 * This is direct violation of TCP specs, but it is required
67 * to prevent DoS attacks. It is called when a retransmission timeout
68 * or zero probe timeout occurs on orphaned socket.
70 * Also close if our net namespace is exiting; in that case there is no
71 * hope of ever communicating again since all netns interfaces are already
72 * down (or about to be down), and we need to release our dst references,
73 * which have been moved to the netns loopback interface, so the namespace
74 * can finish exiting. This condition is only possible if we are a kernel
75 * socket, as those do not hold references to the namespace.
77 * Criteria is still not confirmed experimentally and may change.
78 * We kill the socket, if:
79 * 1. If number of orphaned sockets exceeds an administratively configured
80 * limit.
81 * 2. If we have strong memory pressure.
82 * 3. If our net namespace is exiting.
84 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
86 struct tcp_sock *tp = tcp_sk(sk);
87 int shift = 0;
89 /* If peer does not open window for long time, or did not transmit
90 * anything for long time, penalize it. */
91 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
92 shift++;
94 /* If some dubious ICMP arrived, penalize even more. */
95 if (sk->sk_err_soft)
96 shift++;
98 if (tcp_check_oom(sk, shift)) {
99 /* Catch exceptional cases, when connection requires reset.
100 * 1. Last segment was sent recently. */
101 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
102 /* 2. Window is closed. */
103 (!tp->snd_wnd && !tp->packets_out))
104 do_reset = true;
105 if (do_reset)
106 tcp_send_active_reset(sk, GFP_ATOMIC);
107 tcp_done(sk);
108 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
109 return 1;
112 if (!check_net(sock_net(sk))) {
113 /* Not possible to send reset; just close */
114 tcp_done(sk);
115 return 1;
118 return 0;
122 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
123 * @sk: Pointer to the current socket.
124 * @alive: bool, socket alive state
126 static int tcp_orphan_retries(struct sock *sk, bool alive)
128 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
130 /* We know from an ICMP that something is wrong. */
131 if (sk->sk_err_soft && !alive)
132 retries = 0;
134 /* However, if socket sent something recently, select some safe
135 * number of retries. 8 corresponds to >100 seconds with minimal
136 * RTO of 200msec. */
137 if (retries == 0 && alive)
138 retries = 8;
139 return retries;
142 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
144 const struct net *net = sock_net(sk);
145 int mss;
147 /* Black hole detection */
148 if (!net->ipv4.sysctl_tcp_mtu_probing)
149 return;
151 if (!icsk->icsk_mtup.enabled) {
152 icsk->icsk_mtup.enabled = 1;
153 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
154 } else {
155 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
156 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
157 mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
158 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
159 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
161 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
164 static unsigned int tcp_model_timeout(struct sock *sk,
165 unsigned int boundary,
166 unsigned int rto_base)
168 unsigned int linear_backoff_thresh, timeout;
170 linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
171 if (boundary <= linear_backoff_thresh)
172 timeout = ((2 << boundary) - 1) * rto_base;
173 else
174 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
175 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
176 return jiffies_to_msecs(timeout);
179 * retransmits_timed_out() - returns true if this connection has timed out
180 * @sk: The current socket
181 * @boundary: max number of retransmissions
182 * @timeout: A custom timeout value.
183 * If set to 0 the default timeout is calculated and used.
184 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
186 * The default "timeout" value this function can calculate and use
187 * is equivalent to the timeout of a TCP Connection
188 * after "boundary" unsuccessful, exponentially backed-off
189 * retransmissions with an initial RTO of TCP_RTO_MIN.
191 static bool retransmits_timed_out(struct sock *sk,
192 unsigned int boundary,
193 unsigned int timeout)
195 unsigned int start_ts;
197 if (!inet_csk(sk)->icsk_retransmits)
198 return false;
200 start_ts = tcp_sk(sk)->retrans_stamp;
201 if (likely(timeout == 0)) {
202 unsigned int rto_base = TCP_RTO_MIN;
204 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
205 rto_base = tcp_timeout_init(sk);
206 timeout = tcp_model_timeout(sk, boundary, rto_base);
209 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
212 /* A write timeout has occurred. Process the after effects. */
213 static int tcp_write_timeout(struct sock *sk)
215 struct inet_connection_sock *icsk = inet_csk(sk);
216 struct tcp_sock *tp = tcp_sk(sk);
217 struct net *net = sock_net(sk);
218 bool expired = false, do_reset;
219 int retry_until;
221 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
222 if (icsk->icsk_retransmits) {
223 dst_negative_advice(sk);
224 } else {
225 sk_rethink_txhash(sk);
226 tp->timeout_rehash++;
227 __NET_INC_STATS(sock_net(sk),
228 LINUX_MIB_TCPTIMEOUTREHASH);
230 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
231 expired = icsk->icsk_retransmits >= retry_until;
232 } else {
233 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
234 /* Black hole detection */
235 tcp_mtu_probing(icsk, sk);
237 dst_negative_advice(sk);
238 } else {
239 sk_rethink_txhash(sk);
240 tp->timeout_rehash++;
241 __NET_INC_STATS(sock_net(sk),
242 LINUX_MIB_TCPTIMEOUTREHASH);
245 retry_until = net->ipv4.sysctl_tcp_retries2;
246 if (sock_flag(sk, SOCK_DEAD)) {
247 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
249 retry_until = tcp_orphan_retries(sk, alive);
250 do_reset = alive ||
251 !retransmits_timed_out(sk, retry_until, 0);
253 if (tcp_out_of_resources(sk, do_reset))
254 return 1;
257 if (!expired)
258 expired = retransmits_timed_out(sk, retry_until,
259 icsk->icsk_user_timeout);
260 tcp_fastopen_active_detect_blackhole(sk, expired);
262 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
263 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
264 icsk->icsk_retransmits,
265 icsk->icsk_rto, (int)expired);
267 if (expired) {
268 /* Has it gone just too far? */
269 tcp_write_err(sk);
270 return 1;
273 return 0;
276 /* Called with BH disabled */
277 void tcp_delack_timer_handler(struct sock *sk)
279 struct inet_connection_sock *icsk = inet_csk(sk);
281 sk_mem_reclaim_partial(sk);
283 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
284 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
285 goto out;
287 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
288 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
289 goto out;
291 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
293 if (inet_csk_ack_scheduled(sk)) {
294 if (!inet_csk_in_pingpong_mode(sk)) {
295 /* Delayed ACK missed: inflate ATO. */
296 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
297 } else {
298 /* Delayed ACK missed: leave pingpong mode and
299 * deflate ATO.
301 inet_csk_exit_pingpong_mode(sk);
302 icsk->icsk_ack.ato = TCP_ATO_MIN;
304 tcp_mstamp_refresh(tcp_sk(sk));
305 tcp_send_ack(sk);
306 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
309 out:
310 if (tcp_under_memory_pressure(sk))
311 sk_mem_reclaim(sk);
316 * tcp_delack_timer() - The TCP delayed ACK timeout handler
317 * @data: Pointer to the current socket. (gets casted to struct sock *)
319 * This function gets (indirectly) called when the kernel timer for a TCP packet
320 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
322 * Returns: Nothing (void)
324 static void tcp_delack_timer(struct timer_list *t)
326 struct inet_connection_sock *icsk =
327 from_timer(icsk, t, icsk_delack_timer);
328 struct sock *sk = &icsk->icsk_inet.sk;
330 bh_lock_sock(sk);
331 if (!sock_owned_by_user(sk)) {
332 tcp_delack_timer_handler(sk);
333 } else {
334 icsk->icsk_ack.blocked = 1;
335 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
336 /* deleguate our work to tcp_release_cb() */
337 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
338 sock_hold(sk);
340 bh_unlock_sock(sk);
341 sock_put(sk);
344 static void tcp_probe_timer(struct sock *sk)
346 struct inet_connection_sock *icsk = inet_csk(sk);
347 struct sk_buff *skb = tcp_send_head(sk);
348 struct tcp_sock *tp = tcp_sk(sk);
349 int max_probes;
351 if (tp->packets_out || !skb) {
352 icsk->icsk_probes_out = 0;
353 return;
356 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
357 * long as the receiver continues to respond probes. We support this by
358 * default and reset icsk_probes_out with incoming ACKs. But if the
359 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
360 * kill the socket when the retry count and the time exceeds the
361 * corresponding system limit. We also implement similar policy when
362 * we use RTO to probe window in tcp_retransmit_timer().
364 if (icsk->icsk_user_timeout) {
365 u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
366 tcp_probe0_base(sk));
368 if (elapsed >= icsk->icsk_user_timeout)
369 goto abort;
372 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
373 if (sock_flag(sk, SOCK_DEAD)) {
374 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
376 max_probes = tcp_orphan_retries(sk, alive);
377 if (!alive && icsk->icsk_backoff >= max_probes)
378 goto abort;
379 if (tcp_out_of_resources(sk, true))
380 return;
383 if (icsk->icsk_probes_out >= max_probes) {
384 abort: tcp_write_err(sk);
385 } else {
386 /* Only send another probe if we didn't close things up. */
387 tcp_send_probe0(sk);
392 * Timer for Fast Open socket to retransmit SYNACK. Note that the
393 * sk here is the child socket, not the parent (listener) socket.
395 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
397 struct inet_connection_sock *icsk = inet_csk(sk);
398 int max_retries = icsk->icsk_syn_retries ? :
399 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
400 struct tcp_sock *tp = tcp_sk(sk);
402 req->rsk_ops->syn_ack_timeout(req);
404 if (req->num_timeout >= max_retries) {
405 tcp_write_err(sk);
406 return;
408 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
409 if (icsk->icsk_retransmits == 1)
410 tcp_enter_loss(sk);
411 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
412 * returned from rtx_syn_ack() to make it more persistent like
413 * regular retransmit because if the child socket has been accepted
414 * it's not good to give up too easily.
416 inet_rtx_syn_ack(sk, req);
417 req->num_timeout++;
418 icsk->icsk_retransmits++;
419 if (!tp->retrans_stamp)
420 tp->retrans_stamp = tcp_time_stamp(tp);
421 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
422 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
427 * tcp_retransmit_timer() - The TCP retransmit timeout handler
428 * @sk: Pointer to the current socket.
430 * This function gets called when the kernel timer for a TCP packet
431 * of this socket expires.
433 * It handles retransmission, timer adjustment and other necesarry measures.
435 * Returns: Nothing (void)
437 void tcp_retransmit_timer(struct sock *sk)
439 struct tcp_sock *tp = tcp_sk(sk);
440 struct net *net = sock_net(sk);
441 struct inet_connection_sock *icsk = inet_csk(sk);
442 struct request_sock *req;
443 struct sk_buff *skb;
445 req = rcu_dereference_protected(tp->fastopen_rsk,
446 lockdep_sock_is_held(sk));
447 if (req) {
448 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
449 sk->sk_state != TCP_FIN_WAIT1);
450 tcp_fastopen_synack_timer(sk, req);
451 /* Before we receive ACK to our SYN-ACK don't retransmit
452 * anything else (e.g., data or FIN segments).
454 return;
457 if (!tp->packets_out)
458 return;
460 skb = tcp_rtx_queue_head(sk);
461 if (WARN_ON_ONCE(!skb))
462 return;
464 tp->tlp_high_seq = 0;
466 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
467 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
468 /* Receiver dastardly shrinks window. Our retransmits
469 * become zero probes, but we should not timeout this
470 * connection. If the socket is an orphan, time it out,
471 * we cannot allow such beasts to hang infinitely.
473 struct inet_sock *inet = inet_sk(sk);
474 if (sk->sk_family == AF_INET) {
475 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
476 &inet->inet_daddr,
477 ntohs(inet->inet_dport),
478 inet->inet_num,
479 tp->snd_una, tp->snd_nxt);
481 #if IS_ENABLED(CONFIG_IPV6)
482 else if (sk->sk_family == AF_INET6) {
483 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
484 &sk->sk_v6_daddr,
485 ntohs(inet->inet_dport),
486 inet->inet_num,
487 tp->snd_una, tp->snd_nxt);
489 #endif
490 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
491 tcp_write_err(sk);
492 goto out;
494 tcp_enter_loss(sk);
495 tcp_retransmit_skb(sk, skb, 1);
496 __sk_dst_reset(sk);
497 goto out_reset_timer;
500 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
501 if (tcp_write_timeout(sk))
502 goto out;
504 if (icsk->icsk_retransmits == 0) {
505 int mib_idx = 0;
507 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
508 if (tcp_is_sack(tp))
509 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
510 else
511 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
512 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
513 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
514 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
515 tp->sacked_out) {
516 if (tcp_is_sack(tp))
517 mib_idx = LINUX_MIB_TCPSACKFAILURES;
518 else
519 mib_idx = LINUX_MIB_TCPRENOFAILURES;
521 if (mib_idx)
522 __NET_INC_STATS(sock_net(sk), mib_idx);
525 tcp_enter_loss(sk);
527 icsk->icsk_retransmits++;
528 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
529 /* Retransmission failed because of local congestion,
530 * Let senders fight for local resources conservatively.
532 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
533 TCP_RESOURCE_PROBE_INTERVAL,
534 TCP_RTO_MAX);
535 goto out;
538 /* Increase the timeout each time we retransmit. Note that
539 * we do not increase the rtt estimate. rto is initialized
540 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
541 * that doubling rto each time is the least we can get away with.
542 * In KA9Q, Karn uses this for the first few times, and then
543 * goes to quadratic. netBSD doubles, but only goes up to *64,
544 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
545 * defined in the protocol as the maximum possible RTT. I guess
546 * we'll have to use something other than TCP to talk to the
547 * University of Mars.
549 * PAWS allows us longer timeouts and large windows, so once
550 * implemented ftp to mars will work nicely. We will have to fix
551 * the 120 second clamps though!
553 icsk->icsk_backoff++;
555 out_reset_timer:
556 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
557 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
558 * might be increased if the stream oscillates between thin and thick,
559 * thus the old value might already be too high compared to the value
560 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
561 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
562 * exponential backoff behaviour to avoid continue hammering
563 * linear-timeout retransmissions into a black hole
565 if (sk->sk_state == TCP_ESTABLISHED &&
566 (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
567 tcp_stream_is_thin(tp) &&
568 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
569 icsk->icsk_backoff = 0;
570 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
571 } else {
572 /* Use normal (exponential) backoff */
573 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
575 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
576 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
577 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
578 __sk_dst_reset(sk);
580 out:;
583 /* Called with bottom-half processing disabled.
584 Called by tcp_write_timer() */
585 void tcp_write_timer_handler(struct sock *sk)
587 struct inet_connection_sock *icsk = inet_csk(sk);
588 int event;
590 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
591 !icsk->icsk_pending)
592 goto out;
594 if (time_after(icsk->icsk_timeout, jiffies)) {
595 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
596 goto out;
599 tcp_mstamp_refresh(tcp_sk(sk));
600 event = icsk->icsk_pending;
602 switch (event) {
603 case ICSK_TIME_REO_TIMEOUT:
604 tcp_rack_reo_timeout(sk);
605 break;
606 case ICSK_TIME_LOSS_PROBE:
607 tcp_send_loss_probe(sk);
608 break;
609 case ICSK_TIME_RETRANS:
610 icsk->icsk_pending = 0;
611 tcp_retransmit_timer(sk);
612 break;
613 case ICSK_TIME_PROBE0:
614 icsk->icsk_pending = 0;
615 tcp_probe_timer(sk);
616 break;
619 out:
620 sk_mem_reclaim(sk);
623 static void tcp_write_timer(struct timer_list *t)
625 struct inet_connection_sock *icsk =
626 from_timer(icsk, t, icsk_retransmit_timer);
627 struct sock *sk = &icsk->icsk_inet.sk;
629 bh_lock_sock(sk);
630 if (!sock_owned_by_user(sk)) {
631 tcp_write_timer_handler(sk);
632 } else {
633 /* delegate our work to tcp_release_cb() */
634 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
635 sock_hold(sk);
637 bh_unlock_sock(sk);
638 sock_put(sk);
641 void tcp_syn_ack_timeout(const struct request_sock *req)
643 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
645 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
647 EXPORT_SYMBOL(tcp_syn_ack_timeout);
649 void tcp_set_keepalive(struct sock *sk, int val)
651 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
652 return;
654 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
655 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
656 else if (!val)
657 inet_csk_delete_keepalive_timer(sk);
659 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
662 static void tcp_keepalive_timer (struct timer_list *t)
664 struct sock *sk = from_timer(sk, t, sk_timer);
665 struct inet_connection_sock *icsk = inet_csk(sk);
666 struct tcp_sock *tp = tcp_sk(sk);
667 u32 elapsed;
669 /* Only process if socket is not in use. */
670 bh_lock_sock(sk);
671 if (sock_owned_by_user(sk)) {
672 /* Try again later. */
673 inet_csk_reset_keepalive_timer (sk, HZ/20);
674 goto out;
677 if (sk->sk_state == TCP_LISTEN) {
678 pr_err("Hmm... keepalive on a LISTEN ???\n");
679 goto out;
682 tcp_mstamp_refresh(tp);
683 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
684 if (tp->linger2 >= 0) {
685 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
687 if (tmo > 0) {
688 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
689 goto out;
692 tcp_send_active_reset(sk, GFP_ATOMIC);
693 goto death;
696 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
697 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
698 goto out;
700 elapsed = keepalive_time_when(tp);
702 /* It is alive without keepalive 8) */
703 if (tp->packets_out || !tcp_write_queue_empty(sk))
704 goto resched;
706 elapsed = keepalive_time_elapsed(tp);
708 if (elapsed >= keepalive_time_when(tp)) {
709 /* If the TCP_USER_TIMEOUT option is enabled, use that
710 * to determine when to timeout instead.
712 if ((icsk->icsk_user_timeout != 0 &&
713 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
714 icsk->icsk_probes_out > 0) ||
715 (icsk->icsk_user_timeout == 0 &&
716 icsk->icsk_probes_out >= keepalive_probes(tp))) {
717 tcp_send_active_reset(sk, GFP_ATOMIC);
718 tcp_write_err(sk);
719 goto out;
721 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
722 icsk->icsk_probes_out++;
723 elapsed = keepalive_intvl_when(tp);
724 } else {
725 /* If keepalive was lost due to local congestion,
726 * try harder.
728 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
730 } else {
731 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
732 elapsed = keepalive_time_when(tp) - elapsed;
735 sk_mem_reclaim(sk);
737 resched:
738 inet_csk_reset_keepalive_timer (sk, elapsed);
739 goto out;
741 death:
742 tcp_done(sk);
744 out:
745 bh_unlock_sock(sk);
746 sock_put(sk);
749 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
751 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
752 struct sock *sk = (struct sock *)tp;
754 bh_lock_sock(sk);
755 if (!sock_owned_by_user(sk)) {
756 if (tp->compressed_ack) {
757 /* Since we have to send one ack finally,
758 * substract one from tp->compressed_ack to keep
759 * LINUX_MIB_TCPACKCOMPRESSED accurate.
761 tp->compressed_ack--;
762 tcp_send_ack(sk);
764 } else {
765 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
766 &sk->sk_tsq_flags))
767 sock_hold(sk);
769 bh_unlock_sock(sk);
771 sock_put(sk);
773 return HRTIMER_NORESTART;
776 void tcp_init_xmit_timers(struct sock *sk)
778 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
779 &tcp_keepalive_timer);
780 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
781 HRTIMER_MODE_ABS_PINNED_SOFT);
782 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
784 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
785 HRTIMER_MODE_REL_PINNED_SOFT);
786 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;