1 // SPDX-License-Identifier: GPL-2.0-only
3 /* WARNING: This implemenation is not necessarily the same
4 * as the tcp_cubic.c. The purpose is mainly for testing
5 * the kernel BPF logic.
8 * 1. CONFIG_HZ .kconfig map is used.
9 * 2. In bictcp_update(), calculation is changed to use usec
10 * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
11 * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
12 * 3. In bitctcp_update() [under tcp_friendliness], the original
13 * "while (ca->ack_cnt > delta)" loop is changed to the equivalent
14 * "ca->ack_cnt / delta" operation.
17 #include <linux/bpf.h>
18 #include "bpf_tcp_helpers.h"
20 char _license
[] SEC("license") = "GPL";
22 #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
24 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
25 * max_cwnd = snd_cwnd * beta
27 #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
29 /* Two methods of hybrid slow start */
30 #define HYSTART_ACK_TRAIN 0x1
31 #define HYSTART_DELAY 0x2
33 /* Number of delay samples for detecting the increase of delay */
34 #define HYSTART_MIN_SAMPLES 8
35 #define HYSTART_DELAY_MIN (4000U) /* 4ms */
36 #define HYSTART_DELAY_MAX (16000U) /* 16 ms */
37 #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
39 static int fast_convergence
= 1;
40 static const int beta
= 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
41 static int initial_ssthresh
;
42 static const int bic_scale
= 41;
43 static int tcp_friendliness
= 1;
45 static int hystart
= 1;
46 static int hystart_detect
= HYSTART_ACK_TRAIN
| HYSTART_DELAY
;
47 static int hystart_low_window
= 16;
48 static int hystart_ack_delta_us
= 2000;
50 static const __u32 cube_rtt_scale
= (bic_scale
* 10); /* 1024*c/rtt */
51 static const __u32 beta_scale
= 8*(BICTCP_BETA_SCALE
+beta
) / 3
52 / (BICTCP_BETA_SCALE
- beta
);
53 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
54 * so K = cubic_root( (wmax-cwnd)*rtt/c )
55 * the unit of K is bictcp_HZ=2^10, not HZ
60 * the following code has been designed and tested for
61 * cwnd < 1 million packets
63 * HZ < 1,000,00 (corresponding to 10 nano-second)
66 /* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
67 static const __u64 cube_factor
= (__u64
)(1ull << (10+3*BICTCP_HZ
))
70 /* BIC TCP Parameters */
72 __u32 cnt
; /* increase cwnd by 1 after ACKs */
73 __u32 last_max_cwnd
; /* last maximum snd_cwnd */
74 __u32 last_cwnd
; /* the last snd_cwnd */
75 __u32 last_time
; /* time when updated last_cwnd */
76 __u32 bic_origin_point
;/* origin point of bic function */
77 __u32 bic_K
; /* time to origin point
78 from the beginning of the current epoch */
79 __u32 delay_min
; /* min delay (usec) */
80 __u32 epoch_start
; /* beginning of an epoch */
81 __u32 ack_cnt
; /* number of acks */
82 __u32 tcp_cwnd
; /* estimated tcp cwnd */
84 __u8 sample_cnt
; /* number of samples to decide curr_rtt */
85 __u8 found
; /* the exit point is found? */
86 __u32 round_start
; /* beginning of each round */
87 __u32 end_seq
; /* end_seq of the round */
88 __u32 last_ack
; /* last time when the ACK spacing is close */
89 __u32 curr_rtt
; /* the minimum rtt of current round */
92 static inline void bictcp_reset(struct bictcp
*ca
)
95 ca
->last_max_cwnd
= 0;
98 ca
->bic_origin_point
= 0;
107 extern unsigned long CONFIG_HZ __kconfig
;
109 #define USEC_PER_MSEC 1000UL
110 #define USEC_PER_SEC 1000000UL
111 #define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
113 static __always_inline __u64
div64_u64(__u64 dividend
, __u64 divisor
)
115 return dividend
/ divisor
;
118 #define div64_ul div64_u64
120 #define BITS_PER_U64 (sizeof(__u64) * 8)
121 static __always_inline
int fls64(__u64 x
)
123 int num
= BITS_PER_U64
- 1;
128 if (!(x
& (~0ull << (BITS_PER_U64
-32)))) {
132 if (!(x
& (~0ull << (BITS_PER_U64
-16)))) {
136 if (!(x
& (~0ull << (BITS_PER_U64
-8)))) {
140 if (!(x
& (~0ull << (BITS_PER_U64
-4)))) {
144 if (!(x
& (~0ull << (BITS_PER_U64
-2)))) {
148 if (!(x
& (~0ull << (BITS_PER_U64
-1))))
154 static __always_inline __u32
bictcp_clock_us(const struct sock
*sk
)
156 return tcp_sk(sk
)->tcp_mstamp
;
159 static __always_inline
void bictcp_hystart_reset(struct sock
*sk
)
161 struct tcp_sock
*tp
= tcp_sk(sk
);
162 struct bictcp
*ca
= inet_csk_ca(sk
);
164 ca
->round_start
= ca
->last_ack
= bictcp_clock_us(sk
);
165 ca
->end_seq
= tp
->snd_nxt
;
170 /* "struct_ops/" prefix is not a requirement
171 * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
172 * as long as it is used in one of the func ptr
173 * under SEC(".struct_ops").
175 SEC("struct_ops/bictcp_init")
176 void BPF_PROG(bictcp_init
, struct sock
*sk
)
178 struct bictcp
*ca
= inet_csk_ca(sk
);
183 bictcp_hystart_reset(sk
);
185 if (!hystart
&& initial_ssthresh
)
186 tcp_sk(sk
)->snd_ssthresh
= initial_ssthresh
;
189 /* No prefix in SEC will also work.
190 * The remaining tcp-cubic functions have an easier way.
192 SEC("no-sec-prefix-bictcp_cwnd_event")
193 void BPF_PROG(bictcp_cwnd_event
, struct sock
*sk
, enum tcp_ca_event event
)
195 if (event
== CA_EVENT_TX_START
) {
196 struct bictcp
*ca
= inet_csk_ca(sk
);
197 __u32 now
= tcp_jiffies32
;
200 delta
= now
- tcp_sk(sk
)->lsndtime
;
202 /* We were application limited (idle) for a while.
203 * Shift epoch_start to keep cwnd growth to cubic curve.
205 if (ca
->epoch_start
&& delta
> 0) {
206 ca
->epoch_start
+= delta
;
207 if (after(ca
->epoch_start
, now
))
208 ca
->epoch_start
= now
;
215 * cbrt(x) MSB values for x MSB values in [0..63].
216 * Precomputed then refined by hand - Willy Tarreau
219 * v = cbrt(x << 18) - 1
220 * cbrt(x) = (v[x] + 10) >> 6
222 static const __u8 v
[] = {
223 /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
224 /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
225 /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
226 /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
227 /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
228 /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
229 /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
230 /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
233 /* calculate the cubic root of x using a table lookup followed by one
234 * Newton-Raphson iteration.
237 static __always_inline __u32
cubic_root(__u64 a
)
243 return ((__u32
)v
[(__u32
)a
] + 35) >> 6;
247 b
= ((b
* 84) >> 8) - 1;
248 shift
= (a
>> (b
* 3));
250 /* it is needed for verifier's bound check on v */
254 x
= ((__u32
)(((__u32
)v
[shift
] + 10) << b
)) >> 6;
257 * Newton-Raphson iteration
259 * x = ( 2 * x + a / x ) / 3
262 x
= (2 * x
+ (__u32
)div64_u64(a
, (__u64
)x
* (__u64
)(x
- 1)));
263 x
= ((x
* 341) >> 10);
268 * Compute congestion window to use.
270 static __always_inline
void bictcp_update(struct bictcp
*ca
, __u32 cwnd
,
273 __u32 delta
, bic_target
, max_cnt
;
276 ca
->ack_cnt
+= acked
; /* count the number of ACKed packets */
278 if (ca
->last_cwnd
== cwnd
&&
279 (__s32
)(tcp_jiffies32
- ca
->last_time
) <= HZ
/ 32)
282 /* The CUBIC function can update ca->cnt at most once per jiffy.
283 * On all cwnd reduction events, ca->epoch_start is set to 0,
284 * which will force a recalculation of ca->cnt.
286 if (ca
->epoch_start
&& tcp_jiffies32
== ca
->last_time
)
287 goto tcp_friendliness
;
289 ca
->last_cwnd
= cwnd
;
290 ca
->last_time
= tcp_jiffies32
;
292 if (ca
->epoch_start
== 0) {
293 ca
->epoch_start
= tcp_jiffies32
; /* record beginning */
294 ca
->ack_cnt
= acked
; /* start counting */
295 ca
->tcp_cwnd
= cwnd
; /* syn with cubic */
297 if (ca
->last_max_cwnd
<= cwnd
) {
299 ca
->bic_origin_point
= cwnd
;
301 /* Compute new K based on
302 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
304 ca
->bic_K
= cubic_root(cube_factor
305 * (ca
->last_max_cwnd
- cwnd
));
306 ca
->bic_origin_point
= ca
->last_max_cwnd
;
310 /* cubic function - calc*/
311 /* calculate c * time^3 / rtt,
312 * while considering overflow in calculation of time^3
313 * (so time^3 is done by using 64 bit)
314 * and without the support of division of 64bit numbers
315 * (so all divisions are done by using 32 bit)
316 * also NOTE the unit of those veriables
317 * time = (t - K) / 2^bictcp_HZ
318 * c = bic_scale >> 10
319 * rtt = (srtt >> 3) / HZ
320 * !!! The following code does not have overflow problems,
321 * if the cwnd < 1 million packets !!!
324 t
= (__s32
)(tcp_jiffies32
- ca
->epoch_start
) * USEC_PER_JIFFY
;
326 /* change the unit from usec to bictcp_HZ */
330 if (t
< ca
->bic_K
) /* t - K */
331 offs
= ca
->bic_K
- t
;
333 offs
= t
- ca
->bic_K
;
335 /* c/rtt * (t-K)^3 */
336 delta
= (cube_rtt_scale
* offs
* offs
* offs
) >> (10+3*BICTCP_HZ
);
337 if (t
< ca
->bic_K
) /* below origin*/
338 bic_target
= ca
->bic_origin_point
- delta
;
339 else /* above origin*/
340 bic_target
= ca
->bic_origin_point
+ delta
;
342 /* cubic function - calc bictcp_cnt*/
343 if (bic_target
> cwnd
) {
344 ca
->cnt
= cwnd
/ (bic_target
- cwnd
);
346 ca
->cnt
= 100 * cwnd
; /* very small increment*/
350 * The initial growth of cubic function may be too conservative
351 * when the available bandwidth is still unknown.
353 if (ca
->last_max_cwnd
== 0 && ca
->cnt
> 20)
354 ca
->cnt
= 20; /* increase cwnd 5% per RTT */
358 if (tcp_friendliness
) {
359 __u32 scale
= beta_scale
;
362 /* update tcp cwnd */
363 delta
= (cwnd
* scale
) >> 3;
364 if (ca
->ack_cnt
> delta
&& delta
) {
365 n
= ca
->ack_cnt
/ delta
;
366 ca
->ack_cnt
-= n
* delta
;
370 if (ca
->tcp_cwnd
> cwnd
) { /* if bic is slower than tcp */
371 delta
= ca
->tcp_cwnd
- cwnd
;
372 max_cnt
= cwnd
/ delta
;
373 if (ca
->cnt
> max_cnt
)
378 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
379 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
381 ca
->cnt
= max(ca
->cnt
, 2U);
384 /* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
385 void BPF_STRUCT_OPS(bictcp_cong_avoid
, struct sock
*sk
, __u32 ack
, __u32 acked
)
387 struct tcp_sock
*tp
= tcp_sk(sk
);
388 struct bictcp
*ca
= inet_csk_ca(sk
);
390 if (!tcp_is_cwnd_limited(sk
))
393 if (tcp_in_slow_start(tp
)) {
394 if (hystart
&& after(ack
, ca
->end_seq
))
395 bictcp_hystart_reset(sk
);
396 acked
= tcp_slow_start(tp
, acked
);
400 bictcp_update(ca
, tp
->snd_cwnd
, acked
);
401 tcp_cong_avoid_ai(tp
, ca
->cnt
, acked
);
404 __u32
BPF_STRUCT_OPS(bictcp_recalc_ssthresh
, struct sock
*sk
)
406 const struct tcp_sock
*tp
= tcp_sk(sk
);
407 struct bictcp
*ca
= inet_csk_ca(sk
);
409 ca
->epoch_start
= 0; /* end of epoch */
411 /* Wmax and fast convergence */
412 if (tp
->snd_cwnd
< ca
->last_max_cwnd
&& fast_convergence
)
413 ca
->last_max_cwnd
= (tp
->snd_cwnd
* (BICTCP_BETA_SCALE
+ beta
))
414 / (2 * BICTCP_BETA_SCALE
);
416 ca
->last_max_cwnd
= tp
->snd_cwnd
;
418 return max((tp
->snd_cwnd
* beta
) / BICTCP_BETA_SCALE
, 2U);
421 void BPF_STRUCT_OPS(bictcp_state
, struct sock
*sk
, __u8 new_state
)
423 if (new_state
== TCP_CA_Loss
) {
424 bictcp_reset(inet_csk_ca(sk
));
425 bictcp_hystart_reset(sk
);
429 #define GSO_MAX_SIZE 65536
431 /* Account for TSO/GRO delays.
432 * Otherwise short RTT flows could get too small ssthresh, since during
433 * slow start we begin with small TSO packets and ca->delay_min would
434 * not account for long aggregation delay when TSO packets get bigger.
435 * Ideally even with a very small RTT we would like to have at least one
436 * TSO packet being sent and received by GRO, and another one in qdisc layer.
437 * We apply another 100% factor because @rate is doubled at this point.
438 * We cap the cushion to 1ms.
440 static __always_inline __u32
hystart_ack_delay(struct sock
*sk
)
444 rate
= sk
->sk_pacing_rate
;
447 return min((__u64
)USEC_PER_MSEC
,
448 div64_ul((__u64
)GSO_MAX_SIZE
* 4 * USEC_PER_SEC
, rate
));
451 static __always_inline
void hystart_update(struct sock
*sk
, __u32 delay
)
453 struct tcp_sock
*tp
= tcp_sk(sk
);
454 struct bictcp
*ca
= inet_csk_ca(sk
);
457 if (hystart_detect
& HYSTART_ACK_TRAIN
) {
458 __u32 now
= bictcp_clock_us(sk
);
460 /* first detection parameter - ack-train detection */
461 if ((__s32
)(now
- ca
->last_ack
) <= hystart_ack_delta_us
) {
464 threshold
= ca
->delay_min
+ hystart_ack_delay(sk
);
466 /* Hystart ack train triggers if we get ack past
468 * Pacing might have delayed packets up to RTT/2
471 if (sk
->sk_pacing_status
== SK_PACING_NONE
)
474 if ((__s32
)(now
- ca
->round_start
) > threshold
) {
476 tp
->snd_ssthresh
= tp
->snd_cwnd
;
481 if (hystart_detect
& HYSTART_DELAY
) {
482 /* obtain the minimum delay of more than sampling packets */
483 if (ca
->sample_cnt
< HYSTART_MIN_SAMPLES
) {
484 if (ca
->curr_rtt
> delay
)
485 ca
->curr_rtt
= delay
;
489 if (ca
->curr_rtt
> ca
->delay_min
+
490 HYSTART_DELAY_THRESH(ca
->delay_min
>> 3)) {
492 tp
->snd_ssthresh
= tp
->snd_cwnd
;
498 void BPF_STRUCT_OPS(bictcp_acked
, struct sock
*sk
,
499 const struct ack_sample
*sample
)
501 const struct tcp_sock
*tp
= tcp_sk(sk
);
502 struct bictcp
*ca
= inet_csk_ca(sk
);
505 /* Some calls are for duplicates without timetamps */
506 if (sample
->rtt_us
< 0)
509 /* Discard delay samples right after fast recovery */
510 if (ca
->epoch_start
&& (__s32
)(tcp_jiffies32
- ca
->epoch_start
) < HZ
)
513 delay
= sample
->rtt_us
;
517 /* first time call or link delay decreases */
518 if (ca
->delay_min
== 0 || ca
->delay_min
> delay
)
519 ca
->delay_min
= delay
;
521 /* hystart triggers when cwnd is larger than some threshold */
522 if (!ca
->found
&& tcp_in_slow_start(tp
) && hystart
&&
523 tp
->snd_cwnd
>= hystart_low_window
)
524 hystart_update(sk
, delay
);
527 __u32
BPF_STRUCT_OPS(tcp_reno_undo_cwnd
, struct sock
*sk
)
529 const struct tcp_sock
*tp
= tcp_sk(sk
);
531 return max(tp
->snd_cwnd
, tp
->prior_cwnd
);
535 struct tcp_congestion_ops cubic
= {
536 .init
= (void *)bictcp_init
,
537 .ssthresh
= (void *)bictcp_recalc_ssthresh
,
538 .cong_avoid
= (void *)bictcp_cong_avoid
,
539 .set_state
= (void *)bictcp_state
,
540 .undo_cwnd
= (void *)tcp_reno_undo_cwnd
,
541 .cwnd_event
= (void *)bictcp_cwnd_event
,
542 .pkts_acked
= (void *)bictcp_acked
,