1 // SPDX-License-Identifier: GPL-2.0-only
3 /* WARNING: This implemenation is not necessarily the same
4 * as the tcp_cubic.c. The purpose is mainly for testing
5 * the kernel BPF logic.
8 * 1. CONFIG_HZ .kconfig map is used.
9 * 2. In bictcp_update(), calculation is changed to use usec
10 * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
11 * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
12 * 3. In bitctcp_update() [under tcp_friendliness], the original
13 * "while (ca->ack_cnt > delta)" loop is changed to the equivalent
14 * "ca->ack_cnt / delta" operation.
17 #include <linux/bpf.h>
18 #include <linux/stddef.h>
19 #include <linux/tcp.h>
20 #include "bpf_tcp_helpers.h"
22 char _license
[] SEC("license") = "GPL";
24 #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
26 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
27 * max_cwnd = snd_cwnd * beta
29 #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
31 /* Two methods of hybrid slow start */
32 #define HYSTART_ACK_TRAIN 0x1
33 #define HYSTART_DELAY 0x2
35 /* Number of delay samples for detecting the increase of delay */
36 #define HYSTART_MIN_SAMPLES 8
37 #define HYSTART_DELAY_MIN (4000U) /* 4ms */
38 #define HYSTART_DELAY_MAX (16000U) /* 16 ms */
39 #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
41 static int fast_convergence
= 1;
42 static const int beta
= 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
43 static int initial_ssthresh
;
44 static const int bic_scale
= 41;
45 static int tcp_friendliness
= 1;
47 static int hystart
= 1;
48 static int hystart_detect
= HYSTART_ACK_TRAIN
| HYSTART_DELAY
;
49 static int hystart_low_window
= 16;
50 static int hystart_ack_delta_us
= 2000;
52 static const __u32 cube_rtt_scale
= (bic_scale
* 10); /* 1024*c/rtt */
53 static const __u32 beta_scale
= 8*(BICTCP_BETA_SCALE
+beta
) / 3
54 / (BICTCP_BETA_SCALE
- beta
);
55 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
56 * so K = cubic_root( (wmax-cwnd)*rtt/c )
57 * the unit of K is bictcp_HZ=2^10, not HZ
62 * the following code has been designed and tested for
63 * cwnd < 1 million packets
65 * HZ < 1,000,00 (corresponding to 10 nano-second)
68 /* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
69 static const __u64 cube_factor
= (__u64
)(1ull << (10+3*BICTCP_HZ
))
72 /* BIC TCP Parameters */
74 __u32 cnt
; /* increase cwnd by 1 after ACKs */
75 __u32 last_max_cwnd
; /* last maximum snd_cwnd */
76 __u32 last_cwnd
; /* the last snd_cwnd */
77 __u32 last_time
; /* time when updated last_cwnd */
78 __u32 bic_origin_point
;/* origin point of bic function */
79 __u32 bic_K
; /* time to origin point
80 from the beginning of the current epoch */
81 __u32 delay_min
; /* min delay (usec) */
82 __u32 epoch_start
; /* beginning of an epoch */
83 __u32 ack_cnt
; /* number of acks */
84 __u32 tcp_cwnd
; /* estimated tcp cwnd */
86 __u8 sample_cnt
; /* number of samples to decide curr_rtt */
87 __u8 found
; /* the exit point is found? */
88 __u32 round_start
; /* beginning of each round */
89 __u32 end_seq
; /* end_seq of the round */
90 __u32 last_ack
; /* last time when the ACK spacing is close */
91 __u32 curr_rtt
; /* the minimum rtt of current round */
94 static inline void bictcp_reset(struct bictcp
*ca
)
97 ca
->last_max_cwnd
= 0;
100 ca
->bic_origin_point
= 0;
109 extern unsigned long CONFIG_HZ __kconfig
;
111 #define USEC_PER_MSEC 1000UL
112 #define USEC_PER_SEC 1000000UL
113 #define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
115 static __always_inline __u64
div64_u64(__u64 dividend
, __u64 divisor
)
117 return dividend
/ divisor
;
120 #define div64_ul div64_u64
122 #define BITS_PER_U64 (sizeof(__u64) * 8)
123 static __always_inline
int fls64(__u64 x
)
125 int num
= BITS_PER_U64
- 1;
130 if (!(x
& (~0ull << (BITS_PER_U64
-32)))) {
134 if (!(x
& (~0ull << (BITS_PER_U64
-16)))) {
138 if (!(x
& (~0ull << (BITS_PER_U64
-8)))) {
142 if (!(x
& (~0ull << (BITS_PER_U64
-4)))) {
146 if (!(x
& (~0ull << (BITS_PER_U64
-2)))) {
150 if (!(x
& (~0ull << (BITS_PER_U64
-1))))
156 static __always_inline __u32
bictcp_clock_us(const struct sock
*sk
)
158 return tcp_sk(sk
)->tcp_mstamp
;
161 static __always_inline
void bictcp_hystart_reset(struct sock
*sk
)
163 struct tcp_sock
*tp
= tcp_sk(sk
);
164 struct bictcp
*ca
= inet_csk_ca(sk
);
166 ca
->round_start
= ca
->last_ack
= bictcp_clock_us(sk
);
167 ca
->end_seq
= tp
->snd_nxt
;
172 /* "struct_ops/" prefix is not a requirement
173 * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
174 * as long as it is used in one of the func ptr
175 * under SEC(".struct_ops").
177 SEC("struct_ops/bictcp_init")
178 void BPF_PROG(bictcp_init
, struct sock
*sk
)
180 struct bictcp
*ca
= inet_csk_ca(sk
);
185 bictcp_hystart_reset(sk
);
187 if (!hystart
&& initial_ssthresh
)
188 tcp_sk(sk
)->snd_ssthresh
= initial_ssthresh
;
191 /* No prefix in SEC will also work.
192 * The remaining tcp-cubic functions have an easier way.
194 SEC("no-sec-prefix-bictcp_cwnd_event")
195 void BPF_PROG(bictcp_cwnd_event
, struct sock
*sk
, enum tcp_ca_event event
)
197 if (event
== CA_EVENT_TX_START
) {
198 struct bictcp
*ca
= inet_csk_ca(sk
);
199 __u32 now
= tcp_jiffies32
;
202 delta
= now
- tcp_sk(sk
)->lsndtime
;
204 /* We were application limited (idle) for a while.
205 * Shift epoch_start to keep cwnd growth to cubic curve.
207 if (ca
->epoch_start
&& delta
> 0) {
208 ca
->epoch_start
+= delta
;
209 if (after(ca
->epoch_start
, now
))
210 ca
->epoch_start
= now
;
217 * cbrt(x) MSB values for x MSB values in [0..63].
218 * Precomputed then refined by hand - Willy Tarreau
221 * v = cbrt(x << 18) - 1
222 * cbrt(x) = (v[x] + 10) >> 6
224 static const __u8 v
[] = {
225 /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
226 /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
227 /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
228 /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
229 /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
230 /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
231 /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
232 /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
235 /* calculate the cubic root of x using a table lookup followed by one
236 * Newton-Raphson iteration.
239 static __always_inline __u32
cubic_root(__u64 a
)
245 return ((__u32
)v
[(__u32
)a
] + 35) >> 6;
249 b
= ((b
* 84) >> 8) - 1;
250 shift
= (a
>> (b
* 3));
252 /* it is needed for verifier's bound check on v */
256 x
= ((__u32
)(((__u32
)v
[shift
] + 10) << b
)) >> 6;
259 * Newton-Raphson iteration
261 * x = ( 2 * x + a / x ) / 3
264 x
= (2 * x
+ (__u32
)div64_u64(a
, (__u64
)x
* (__u64
)(x
- 1)));
265 x
= ((x
* 341) >> 10);
270 * Compute congestion window to use.
272 static __always_inline
void bictcp_update(struct bictcp
*ca
, __u32 cwnd
,
275 __u32 delta
, bic_target
, max_cnt
;
278 ca
->ack_cnt
+= acked
; /* count the number of ACKed packets */
280 if (ca
->last_cwnd
== cwnd
&&
281 (__s32
)(tcp_jiffies32
- ca
->last_time
) <= HZ
/ 32)
284 /* The CUBIC function can update ca->cnt at most once per jiffy.
285 * On all cwnd reduction events, ca->epoch_start is set to 0,
286 * which will force a recalculation of ca->cnt.
288 if (ca
->epoch_start
&& tcp_jiffies32
== ca
->last_time
)
289 goto tcp_friendliness
;
291 ca
->last_cwnd
= cwnd
;
292 ca
->last_time
= tcp_jiffies32
;
294 if (ca
->epoch_start
== 0) {
295 ca
->epoch_start
= tcp_jiffies32
; /* record beginning */
296 ca
->ack_cnt
= acked
; /* start counting */
297 ca
->tcp_cwnd
= cwnd
; /* syn with cubic */
299 if (ca
->last_max_cwnd
<= cwnd
) {
301 ca
->bic_origin_point
= cwnd
;
303 /* Compute new K based on
304 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
306 ca
->bic_K
= cubic_root(cube_factor
307 * (ca
->last_max_cwnd
- cwnd
));
308 ca
->bic_origin_point
= ca
->last_max_cwnd
;
312 /* cubic function - calc*/
313 /* calculate c * time^3 / rtt,
314 * while considering overflow in calculation of time^3
315 * (so time^3 is done by using 64 bit)
316 * and without the support of division of 64bit numbers
317 * (so all divisions are done by using 32 bit)
318 * also NOTE the unit of those veriables
319 * time = (t - K) / 2^bictcp_HZ
320 * c = bic_scale >> 10
321 * rtt = (srtt >> 3) / HZ
322 * !!! The following code does not have overflow problems,
323 * if the cwnd < 1 million packets !!!
326 t
= (__s32
)(tcp_jiffies32
- ca
->epoch_start
) * USEC_PER_JIFFY
;
328 /* change the unit from usec to bictcp_HZ */
332 if (t
< ca
->bic_K
) /* t - K */
333 offs
= ca
->bic_K
- t
;
335 offs
= t
- ca
->bic_K
;
337 /* c/rtt * (t-K)^3 */
338 delta
= (cube_rtt_scale
* offs
* offs
* offs
) >> (10+3*BICTCP_HZ
);
339 if (t
< ca
->bic_K
) /* below origin*/
340 bic_target
= ca
->bic_origin_point
- delta
;
341 else /* above origin*/
342 bic_target
= ca
->bic_origin_point
+ delta
;
344 /* cubic function - calc bictcp_cnt*/
345 if (bic_target
> cwnd
) {
346 ca
->cnt
= cwnd
/ (bic_target
- cwnd
);
348 ca
->cnt
= 100 * cwnd
; /* very small increment*/
352 * The initial growth of cubic function may be too conservative
353 * when the available bandwidth is still unknown.
355 if (ca
->last_max_cwnd
== 0 && ca
->cnt
> 20)
356 ca
->cnt
= 20; /* increase cwnd 5% per RTT */
360 if (tcp_friendliness
) {
361 __u32 scale
= beta_scale
;
364 /* update tcp cwnd */
365 delta
= (cwnd
* scale
) >> 3;
366 if (ca
->ack_cnt
> delta
&& delta
) {
367 n
= ca
->ack_cnt
/ delta
;
368 ca
->ack_cnt
-= n
* delta
;
372 if (ca
->tcp_cwnd
> cwnd
) { /* if bic is slower than tcp */
373 delta
= ca
->tcp_cwnd
- cwnd
;
374 max_cnt
= cwnd
/ delta
;
375 if (ca
->cnt
> max_cnt
)
380 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
381 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
383 ca
->cnt
= max(ca
->cnt
, 2U);
386 /* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
387 void BPF_STRUCT_OPS(bictcp_cong_avoid
, struct sock
*sk
, __u32 ack
, __u32 acked
)
389 struct tcp_sock
*tp
= tcp_sk(sk
);
390 struct bictcp
*ca
= inet_csk_ca(sk
);
392 if (!tcp_is_cwnd_limited(sk
))
395 if (tcp_in_slow_start(tp
)) {
396 if (hystart
&& after(ack
, ca
->end_seq
))
397 bictcp_hystart_reset(sk
);
398 acked
= tcp_slow_start(tp
, acked
);
402 bictcp_update(ca
, tp
->snd_cwnd
, acked
);
403 tcp_cong_avoid_ai(tp
, ca
->cnt
, acked
);
406 __u32
BPF_STRUCT_OPS(bictcp_recalc_ssthresh
, struct sock
*sk
)
408 const struct tcp_sock
*tp
= tcp_sk(sk
);
409 struct bictcp
*ca
= inet_csk_ca(sk
);
411 ca
->epoch_start
= 0; /* end of epoch */
413 /* Wmax and fast convergence */
414 if (tp
->snd_cwnd
< ca
->last_max_cwnd
&& fast_convergence
)
415 ca
->last_max_cwnd
= (tp
->snd_cwnd
* (BICTCP_BETA_SCALE
+ beta
))
416 / (2 * BICTCP_BETA_SCALE
);
418 ca
->last_max_cwnd
= tp
->snd_cwnd
;
420 return max((tp
->snd_cwnd
* beta
) / BICTCP_BETA_SCALE
, 2U);
423 void BPF_STRUCT_OPS(bictcp_state
, struct sock
*sk
, __u8 new_state
)
425 if (new_state
== TCP_CA_Loss
) {
426 bictcp_reset(inet_csk_ca(sk
));
427 bictcp_hystart_reset(sk
);
431 #define GSO_MAX_SIZE 65536
433 /* Account for TSO/GRO delays.
434 * Otherwise short RTT flows could get too small ssthresh, since during
435 * slow start we begin with small TSO packets and ca->delay_min would
436 * not account for long aggregation delay when TSO packets get bigger.
437 * Ideally even with a very small RTT we would like to have at least one
438 * TSO packet being sent and received by GRO, and another one in qdisc layer.
439 * We apply another 100% factor because @rate is doubled at this point.
440 * We cap the cushion to 1ms.
442 static __always_inline __u32
hystart_ack_delay(struct sock
*sk
)
446 rate
= sk
->sk_pacing_rate
;
449 return min((__u64
)USEC_PER_MSEC
,
450 div64_ul((__u64
)GSO_MAX_SIZE
* 4 * USEC_PER_SEC
, rate
));
453 static __always_inline
void hystart_update(struct sock
*sk
, __u32 delay
)
455 struct tcp_sock
*tp
= tcp_sk(sk
);
456 struct bictcp
*ca
= inet_csk_ca(sk
);
459 if (hystart_detect
& HYSTART_ACK_TRAIN
) {
460 __u32 now
= bictcp_clock_us(sk
);
462 /* first detection parameter - ack-train detection */
463 if ((__s32
)(now
- ca
->last_ack
) <= hystart_ack_delta_us
) {
466 threshold
= ca
->delay_min
+ hystart_ack_delay(sk
);
468 /* Hystart ack train triggers if we get ack past
470 * Pacing might have delayed packets up to RTT/2
473 if (sk
->sk_pacing_status
== SK_PACING_NONE
)
476 if ((__s32
)(now
- ca
->round_start
) > threshold
) {
478 tp
->snd_ssthresh
= tp
->snd_cwnd
;
483 if (hystart_detect
& HYSTART_DELAY
) {
484 /* obtain the minimum delay of more than sampling packets */
485 if (ca
->curr_rtt
> delay
)
486 ca
->curr_rtt
= delay
;
487 if (ca
->sample_cnt
< HYSTART_MIN_SAMPLES
) {
490 if (ca
->curr_rtt
> ca
->delay_min
+
491 HYSTART_DELAY_THRESH(ca
->delay_min
>> 3)) {
493 tp
->snd_ssthresh
= tp
->snd_cwnd
;
499 void BPF_STRUCT_OPS(bictcp_acked
, struct sock
*sk
,
500 const struct ack_sample
*sample
)
502 const struct tcp_sock
*tp
= tcp_sk(sk
);
503 struct bictcp
*ca
= inet_csk_ca(sk
);
506 /* Some calls are for duplicates without timetamps */
507 if (sample
->rtt_us
< 0)
510 /* Discard delay samples right after fast recovery */
511 if (ca
->epoch_start
&& (__s32
)(tcp_jiffies32
- ca
->epoch_start
) < HZ
)
514 delay
= sample
->rtt_us
;
518 /* first time call or link delay decreases */
519 if (ca
->delay_min
== 0 || ca
->delay_min
> delay
)
520 ca
->delay_min
= delay
;
522 /* hystart triggers when cwnd is larger than some threshold */
523 if (!ca
->found
&& tcp_in_slow_start(tp
) && hystart
&&
524 tp
->snd_cwnd
>= hystart_low_window
)
525 hystart_update(sk
, delay
);
528 __u32
BPF_STRUCT_OPS(tcp_reno_undo_cwnd
, struct sock
*sk
)
530 const struct tcp_sock
*tp
= tcp_sk(sk
);
532 return max(tp
->snd_cwnd
, tp
->prior_cwnd
);
536 struct tcp_congestion_ops cubic
= {
537 .init
= (void *)bictcp_init
,
538 .ssthresh
= (void *)bictcp_recalc_ssthresh
,
539 .cong_avoid
= (void *)bictcp_cong_avoid
,
540 .set_state
= (void *)bictcp_state
,
541 .undo_cwnd
= (void *)tcp_reno_undo_cwnd
,
542 .cwnd_event
= (void *)bictcp_cwnd_event
,
543 .pkts_acked
= (void *)bictcp_acked
,