x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / net / ipv4 / tcp_recovery.c
blobbe8ef1e5dfef79e9d1bdfef4e2199d45cc5536a3
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/tcp.h>
3 #include <net/tcp.h>
5 int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
7 static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
9 struct tcp_sock *tp = tcp_sk(sk);
11 tcp_skb_mark_lost_uncond_verify(tp, skb);
12 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
13 /* Account for retransmits that are lost again */
14 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
15 tp->retrans_out -= tcp_skb_pcount(skb);
16 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
17 tcp_skb_pcount(skb));
21 static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
23 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
26 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
28 * Marks a packet lost, if some packet sent later has been (s)acked.
29 * The underlying idea is similar to the traditional dupthresh and FACK
30 * but they look at different metrics:
32 * dupthresh: 3 OOO packets delivered (packet count)
33 * FACK: sequence delta to highest sacked sequence (sequence space)
34 * RACK: sent time delta to the latest delivered packet (time domain)
36 * The advantage of RACK is it applies to both original and retransmitted
37 * packet and therefore is robust against tail losses. Another advantage
38 * is being more resilient to reordering by simply allowing some
39 * "settling delay", instead of tweaking the dupthresh.
41 * When tcp_rack_detect_loss() detects some packets are lost and we
42 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
43 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
44 * make us enter the CA_Recovery state.
46 static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
48 struct tcp_sock *tp = tcp_sk(sk);
49 struct sk_buff *skb;
50 u32 reo_wnd;
52 *reo_timeout = 0;
53 /* To be more reordering resilient, allow min_rtt/4 settling delay
54 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
55 * RTT because reordering is often a path property and less related
56 * to queuing or delayed ACKs.
58 reo_wnd = 1000;
59 if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
60 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
62 tcp_for_write_queue(skb, sk) {
63 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
65 if (skb == tcp_send_head(sk))
66 break;
68 /* Skip ones already (s)acked */
69 if (!after(scb->end_seq, tp->snd_una) ||
70 scb->sacked & TCPCB_SACKED_ACKED)
71 continue;
73 if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
74 tp->rack.end_seq, scb->end_seq)) {
75 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
76 * A packet is lost if its elapsed time is beyond
77 * the recent RTT plus the reordering window.
79 u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
80 skb->skb_mstamp);
81 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
83 if (remaining < 0) {
84 tcp_rack_mark_skb_lost(sk, skb);
85 continue;
88 /* Skip ones marked lost but not yet retransmitted */
89 if ((scb->sacked & TCPCB_LOST) &&
90 !(scb->sacked & TCPCB_SACKED_RETRANS))
91 continue;
93 /* Record maximum wait time (+1 to avoid 0) */
94 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
96 } else if (!(scb->sacked & TCPCB_RETRANS)) {
97 /* Original data are sent sequentially so stop early
98 * b/c the rest are all sent after rack_sent
100 break;
105 void tcp_rack_mark_lost(struct sock *sk)
107 struct tcp_sock *tp = tcp_sk(sk);
108 u32 timeout;
110 if (!tp->rack.advanced)
111 return;
113 /* Reset the advanced flag to avoid unnecessary queue scanning */
114 tp->rack.advanced = 0;
115 tcp_rack_detect_loss(sk, &timeout);
116 if (timeout) {
117 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
118 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
119 timeout, inet_csk(sk)->icsk_rto);
123 /* Record the most recently (re)sent time among the (s)acked packets
124 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
125 * draft-cheng-tcpm-rack-00.txt
127 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
128 u64 xmit_time)
130 u32 rtt_us;
132 if (tp->rack.mstamp &&
133 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
134 end_seq, tp->rack.end_seq))
135 return;
137 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
138 if (sacked & TCPCB_RETRANS) {
139 /* If the sacked packet was retransmitted, it's ambiguous
140 * whether the retransmission or the original (or the prior
141 * retransmission) was sacked.
143 * If the original is lost, there is no ambiguity. Otherwise
144 * we assume the original can be delayed up to aRTT + min_rtt.
145 * the aRTT term is bounded by the fast recovery or timeout,
146 * so it's at least one RTT (i.e., retransmission is at least
147 * an RTT later).
149 if (rtt_us < tcp_min_rtt(tp))
150 return;
152 tp->rack.rtt_us = rtt_us;
153 tp->rack.mstamp = xmit_time;
154 tp->rack.end_seq = end_seq;
155 tp->rack.advanced = 1;
158 /* We have waited long enough to accommodate reordering. Mark the expired
159 * packets lost and retransmit them.
161 void tcp_rack_reo_timeout(struct sock *sk)
163 struct tcp_sock *tp = tcp_sk(sk);
164 u32 timeout, prior_inflight;
166 prior_inflight = tcp_packets_in_flight(tp);
167 tcp_rack_detect_loss(sk, &timeout);
168 if (prior_inflight != tcp_packets_in_flight(tp)) {
169 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
170 tcp_enter_recovery(sk, false);
171 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
172 tcp_cwnd_reduction(sk, 1, 0);
174 tcp_xmit_retransmit_queue(sk);
176 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
177 tcp_rearm_rto(sk);