1 // SPDX-License-Identifier: GPL-2.0
5 static void tcp_rack_mark_skb_lost(struct sock
*sk
, struct sk_buff
*skb
)
7 struct tcp_sock
*tp
= tcp_sk(sk
);
9 tcp_skb_mark_lost_uncond_verify(tp
, skb
);
10 if (TCP_SKB_CB(skb
)->sacked
& TCPCB_SACKED_RETRANS
) {
11 /* Account for retransmits that are lost again */
12 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_SACKED_RETRANS
;
13 tp
->retrans_out
-= tcp_skb_pcount(skb
);
14 NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPLOSTRETRANSMIT
,
19 static bool tcp_rack_sent_after(u64 t1
, u64 t2
, u32 seq1
, u32 seq2
)
21 return t1
> t2
|| (t1
== t2
&& after(seq1
, seq2
));
24 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
26 * Marks a packet lost, if some packet sent later has been (s)acked.
27 * The underlying idea is similar to the traditional dupthresh and FACK
28 * but they look at different metrics:
30 * dupthresh: 3 OOO packets delivered (packet count)
31 * FACK: sequence delta to highest sacked sequence (sequence space)
32 * RACK: sent time delta to the latest delivered packet (time domain)
34 * The advantage of RACK is it applies to both original and retransmitted
35 * packet and therefore is robust against tail losses. Another advantage
36 * is being more resilient to reordering by simply allowing some
37 * "settling delay", instead of tweaking the dupthresh.
39 * When tcp_rack_detect_loss() detects some packets are lost and we
40 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
41 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
42 * make us enter the CA_Recovery state.
44 static void tcp_rack_detect_loss(struct sock
*sk
, u32
*reo_timeout
)
46 struct tcp_sock
*tp
= tcp_sk(sk
);
47 u32 min_rtt
= tcp_min_rtt(tp
);
48 struct sk_buff
*skb
, *n
;
52 /* To be more reordering resilient, allow min_rtt/4 settling delay
53 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
54 * RTT because reordering is often a path property and less related
55 * to queuing or delayed ACKs.
58 if ((tp
->rack
.reord
|| inet_csk(sk
)->icsk_ca_state
< TCP_CA_Recovery
) &&
60 reo_wnd
= max((min_rtt
>> 2) * tp
->rack
.reo_wnd_steps
, reo_wnd
);
61 reo_wnd
= min(reo_wnd
, tp
->srtt_us
>> 3);
64 list_for_each_entry_safe(skb
, n
, &tp
->tsorted_sent_queue
,
66 struct tcp_skb_cb
*scb
= TCP_SKB_CB(skb
);
69 /* Skip ones marked lost but not yet retransmitted */
70 if ((scb
->sacked
& TCPCB_LOST
) &&
71 !(scb
->sacked
& TCPCB_SACKED_RETRANS
))
74 if (!tcp_rack_sent_after(tp
->rack
.mstamp
, skb
->skb_mstamp
,
75 tp
->rack
.end_seq
, scb
->end_seq
))
78 /* A packet is lost if it has not been s/acked beyond
79 * the recent RTT plus the reordering window.
81 remaining
= tp
->rack
.rtt_us
+ reo_wnd
-
82 tcp_stamp_us_delta(tp
->tcp_mstamp
, skb
->skb_mstamp
);
84 tcp_rack_mark_skb_lost(sk
, skb
);
85 list_del_init(&skb
->tcp_tsorted_anchor
);
87 /* Record maximum wait time */
88 *reo_timeout
= max_t(u32
, *reo_timeout
, remaining
);
93 void tcp_rack_mark_lost(struct sock
*sk
)
95 struct tcp_sock
*tp
= tcp_sk(sk
);
98 if (!tp
->rack
.advanced
)
101 /* Reset the advanced flag to avoid unnecessary queue scanning */
102 tp
->rack
.advanced
= 0;
103 tcp_rack_detect_loss(sk
, &timeout
);
105 timeout
= usecs_to_jiffies(timeout
) + TCP_TIMEOUT_MIN
;
106 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_REO_TIMEOUT
,
107 timeout
, inet_csk(sk
)->icsk_rto
);
111 /* Record the most recently (re)sent time among the (s)acked packets
112 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
113 * draft-cheng-tcpm-rack-00.txt
115 void tcp_rack_advance(struct tcp_sock
*tp
, u8 sacked
, u32 end_seq
,
120 rtt_us
= tcp_stamp_us_delta(tp
->tcp_mstamp
, xmit_time
);
121 if (rtt_us
< tcp_min_rtt(tp
) && (sacked
& TCPCB_RETRANS
)) {
122 /* If the sacked packet was retransmitted, it's ambiguous
123 * whether the retransmission or the original (or the prior
124 * retransmission) was sacked.
126 * If the original is lost, there is no ambiguity. Otherwise
127 * we assume the original can be delayed up to aRTT + min_rtt.
128 * the aRTT term is bounded by the fast recovery or timeout,
129 * so it's at least one RTT (i.e., retransmission is at least
134 tp
->rack
.advanced
= 1;
135 tp
->rack
.rtt_us
= rtt_us
;
136 if (tcp_rack_sent_after(xmit_time
, tp
->rack
.mstamp
,
137 end_seq
, tp
->rack
.end_seq
)) {
138 tp
->rack
.mstamp
= xmit_time
;
139 tp
->rack
.end_seq
= end_seq
;
143 /* We have waited long enough to accommodate reordering. Mark the expired
144 * packets lost and retransmit them.
146 void tcp_rack_reo_timeout(struct sock
*sk
)
148 struct tcp_sock
*tp
= tcp_sk(sk
);
149 u32 timeout
, prior_inflight
;
151 prior_inflight
= tcp_packets_in_flight(tp
);
152 tcp_rack_detect_loss(sk
, &timeout
);
153 if (prior_inflight
!= tcp_packets_in_flight(tp
)) {
154 if (inet_csk(sk
)->icsk_ca_state
!= TCP_CA_Recovery
) {
155 tcp_enter_recovery(sk
, false);
156 if (!inet_csk(sk
)->icsk_ca_ops
->cong_control
)
157 tcp_cwnd_reduction(sk
, 1, 0);
159 tcp_xmit_retransmit_queue(sk
);
161 if (inet_csk(sk
)->icsk_pending
!= ICSK_TIME_RETRANS
)
165 /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
167 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
168 * by srtt), since there is possibility that spurious retransmission was
169 * due to reordering delay longer than reo_wnd.
171 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
172 * no. of successful recoveries (accounts for full DSACK-based loss
173 * recovery undo). After that, reset it to default (min_rtt/4).
175 * At max, reo_wnd is incremented only once per rtt. So that the new
176 * DSACK on which we are reacting, is due to the spurious retx (approx)
177 * after the reo_wnd has been updated last time.
179 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
180 * absolute value to account for change in rtt.
182 void tcp_rack_update_reo_wnd(struct sock
*sk
, struct rate_sample
*rs
)
184 struct tcp_sock
*tp
= tcp_sk(sk
);
186 if (sock_net(sk
)->ipv4
.sysctl_tcp_recovery
& TCP_RACK_STATIC_REO_WND
||
187 !rs
->prior_delivered
)
190 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
191 if (before(rs
->prior_delivered
, tp
->rack
.last_delivered
))
192 tp
->rack
.dsack_seen
= 0;
194 /* Adjust the reo_wnd if update is pending */
195 if (tp
->rack
.dsack_seen
) {
196 tp
->rack
.reo_wnd_steps
= min_t(u32
, 0xFF,
197 tp
->rack
.reo_wnd_steps
+ 1);
198 tp
->rack
.dsack_seen
= 0;
199 tp
->rack
.last_delivered
= tp
->delivered
;
200 tp
->rack
.reo_wnd_persist
= TCP_RACK_RECOVERY_THRESH
;
201 } else if (!tp
->rack
.reo_wnd_persist
) {
202 tp
->rack
.reo_wnd_steps
= 1;