sched/fair: Fix comments
[linux/fpc-iii.git] / net / ipv4 / tcp_recovery.c
blobd8acbd9f477a2ac6b0f8eee1bf59f3ab43abff07
1 #include <linux/tcp.h>
2 #include <net/tcp.h>
4 int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
6 static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
8 struct tcp_sock *tp = tcp_sk(sk);
10 tcp_skb_mark_lost_uncond_verify(tp, skb);
11 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
20 static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
21 const struct skb_mstamp *t2,
22 u32 seq1, u32 seq2)
24 return skb_mstamp_after(t1, t2) ||
25 (t1->v64 == t2->v64 && after(seq1, seq2));
28 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
30 * Marks a packet lost, if some packet sent later has been (s)acked.
31 * The underlying idea is similar to the traditional dupthresh and FACK
32 * but they look at different metrics:
34 * dupthresh: 3 OOO packets delivered (packet count)
35 * FACK: sequence delta to highest sacked sequence (sequence space)
36 * RACK: sent time delta to the latest delivered packet (time domain)
38 * The advantage of RACK is it applies to both original and retransmitted
39 * packet and therefore is robust against tail losses. Another advantage
40 * is being more resilient to reordering by simply allowing some
41 * "settling delay", instead of tweaking the dupthresh.
43 * When tcp_rack_detect_loss() detects some packets are lost and we
44 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
45 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
46 * make us enter the CA_Recovery state.
48 static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
49 u32 *reo_timeout)
51 struct tcp_sock *tp = tcp_sk(sk);
52 struct sk_buff *skb;
53 u32 reo_wnd;
55 *reo_timeout = 0;
56 /* To be more reordering resilient, allow min_rtt/4 settling delay
57 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
58 * RTT because reordering is often a path property and less related
59 * to queuing or delayed ACKs.
61 reo_wnd = 1000;
62 if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
63 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
65 tcp_for_write_queue(skb, sk) {
66 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
68 if (skb == tcp_send_head(sk))
69 break;
71 /* Skip ones already (s)acked */
72 if (!after(scb->end_seq, tp->snd_una) ||
73 scb->sacked & TCPCB_SACKED_ACKED)
74 continue;
76 if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
77 tp->rack.end_seq, scb->end_seq)) {
78 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
79 * A packet is lost if its elapsed time is beyond
80 * the recent RTT plus the reordering window.
82 u32 elapsed = skb_mstamp_us_delta(now,
83 &skb->skb_mstamp);
84 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
86 if (remaining < 0) {
87 tcp_rack_mark_skb_lost(sk, skb);
88 continue;
91 /* Skip ones marked lost but not yet retransmitted */
92 if ((scb->sacked & TCPCB_LOST) &&
93 !(scb->sacked & TCPCB_SACKED_RETRANS))
94 continue;
96 /* Record maximum wait time (+1 to avoid 0) */
97 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
99 } else if (!(scb->sacked & TCPCB_RETRANS)) {
100 /* Original data are sent sequentially so stop early
101 * b/c the rest are all sent after rack_sent
103 break;
108 void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
110 struct tcp_sock *tp = tcp_sk(sk);
111 u32 timeout;
113 if (!tp->rack.advanced)
114 return;
116 /* Reset the advanced flag to avoid unnecessary queue scanning */
117 tp->rack.advanced = 0;
118 tcp_rack_detect_loss(sk, now, &timeout);
119 if (timeout) {
120 timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
121 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
122 timeout, inet_csk(sk)->icsk_rto);
126 /* Record the most recently (re)sent time among the (s)acked packets
127 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
128 * draft-cheng-tcpm-rack-00.txt
130 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
131 const struct skb_mstamp *xmit_time,
132 const struct skb_mstamp *ack_time)
134 u32 rtt_us;
136 if (tp->rack.mstamp.v64 &&
137 !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
138 end_seq, tp->rack.end_seq))
139 return;
141 rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
142 if (sacked & TCPCB_RETRANS) {
143 /* If the sacked packet was retransmitted, it's ambiguous
144 * whether the retransmission or the original (or the prior
145 * retransmission) was sacked.
147 * If the original is lost, there is no ambiguity. Otherwise
148 * we assume the original can be delayed up to aRTT + min_rtt.
149 * the aRTT term is bounded by the fast recovery or timeout,
150 * so it's at least one RTT (i.e., retransmission is at least
151 * an RTT later).
153 if (rtt_us < tcp_min_rtt(tp))
154 return;
156 tp->rack.rtt_us = rtt_us;
157 tp->rack.mstamp = *xmit_time;
158 tp->rack.end_seq = end_seq;
159 tp->rack.advanced = 1;
162 /* We have waited long enough to accommodate reordering. Mark the expired
163 * packets lost and retransmit them.
165 void tcp_rack_reo_timeout(struct sock *sk)
167 struct tcp_sock *tp = tcp_sk(sk);
168 struct skb_mstamp now;
169 u32 timeout, prior_inflight;
171 skb_mstamp_get(&now);
172 prior_inflight = tcp_packets_in_flight(tp);
173 tcp_rack_detect_loss(sk, &now, &timeout);
174 if (prior_inflight != tcp_packets_in_flight(tp)) {
175 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
176 tcp_enter_recovery(sk, false);
177 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
178 tcp_cwnd_reduction(sk, 1, 0);
180 tcp_xmit_retransmit_queue(sk);
182 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
183 tcp_rearm_rto(sk);