mtw(4) remove misplaced DEBUG_FLAGS
[freebsd/src.git] / sys / netinet / tcp_stacks / rack.c
blob902845ad34f6d1912eeaecc0219f18fcbf995331
1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 #include "opt_ipsec.h"
31 #include "opt_ratelimit.h"
32 #include "opt_kern_tls.h"
33 #if defined(INET) || defined(INET6)
34 #include <sys/param.h>
35 #include <sys/arb.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #ifdef TCP_HHOOK
39 #include <sys/hhook.h>
40 #endif
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/mbuf.h>
46 #include <sys/proc.h> /* for proc0 declaration */
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 #ifdef STATS
52 #include <sys/qmath.h>
53 #include <sys/tree.h>
54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
55 #else
56 #include <sys/tree.h>
57 #endif
58 #include <sys/refcount.h>
59 #include <sys/queue.h>
60 #include <sys/tim_filter.h>
61 #include <sys/smp.h>
62 #include <sys/kthread.h>
63 #include <sys/kern_prefetch.h>
64 #include <sys/protosw.h>
65 #ifdef TCP_ACCOUNTING
66 #include <sys/sched.h>
67 #include <machine/cpu.h>
68 #endif
69 #include <vm/uma.h>
71 #include <net/route.h>
72 #include <net/route/nhop.h>
73 #include <net/vnet.h>
75 #define TCPSTATES /* for logging */
77 #include <netinet/in.h>
78 #include <netinet/in_kdtrace.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
83 #include <netinet/ip_var.h>
84 #include <netinet/ip6.h>
85 #include <netinet6/in6_pcb.h>
86 #include <netinet6/ip6_var.h>
87 #include <netinet/tcp.h>
88 #define TCPOUTFLAGS
89 #include <netinet/tcp_fsm.h>
90 #include <netinet/tcp_seq.h>
91 #include <netinet/tcp_timer.h>
92 #include <netinet/tcp_var.h>
93 #include <netinet/tcp_log_buf.h>
94 #include <netinet/tcp_syncache.h>
95 #include <netinet/tcp_hpts.h>
96 #include <netinet/tcp_ratelimit.h>
97 #include <netinet/tcp_accounting.h>
98 #include <netinet/tcpip.h>
99 #include <netinet/cc/cc.h>
100 #include <netinet/cc/cc_newreno.h>
101 #include <netinet/tcp_fastopen.h>
102 #include <netinet/tcp_lro.h>
103 #ifdef NETFLIX_SHARED_CWND
104 #include <netinet/tcp_shared_cwnd.h>
105 #endif
106 #ifdef TCP_OFFLOAD
107 #include <netinet/tcp_offload.h>
108 #endif
109 #ifdef INET6
110 #include <netinet6/tcp6_var.h>
111 #endif
112 #include <netinet/tcp_ecn.h>
114 #include <netipsec/ipsec_support.h>
116 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
117 #include <netipsec/ipsec.h>
118 #include <netipsec/ipsec6.h>
119 #endif /* IPSEC */
121 #include <netinet/udp.h>
122 #include <netinet/udp_var.h>
123 #include <machine/in_cksum.h>
125 #ifdef MAC
126 #include <security/mac/mac_framework.h>
127 #endif
128 #include "sack_filter.h"
129 #include "tcp_rack.h"
130 #include "tailq_hash.h"
131 #include "rack_bbr_common.h"
133 uma_zone_t rack_zone;
134 uma_zone_t rack_pcb_zone;
136 #ifndef TICKS2SBT
137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
138 #endif
140 VNET_DECLARE(uint32_t, newreno_beta);
141 VNET_DECLARE(uint32_t, newreno_beta_ecn);
142 #define V_newreno_beta VNET(newreno_beta)
143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME)
146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME)
148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block");
149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options");
150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information");
152 struct sysctl_ctx_list rack_sysctl_ctx;
153 struct sysctl_oid *rack_sysctl_root;
155 #define CUM_ACKED 1
156 #define SACKED 2
159 * The RACK module incorporates a number of
160 * TCP ideas that have been put out into the IETF
161 * over the last few years:
162 * - Matt Mathis's Rate Halving which slowly drops
163 * the congestion window so that the ack clock can
164 * be maintained during a recovery.
165 * - Yuchung Cheng's RACK TCP (for which its named) that
166 * will stop us using the number of dup acks and instead
167 * use time as the gage of when we retransmit.
168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
169 * of Dukkipati et.al.
170 * RACK depends on SACK, so if an endpoint arrives that
171 * cannot do SACK the state machine below will shuttle the
172 * connection back to using the "default" TCP stack that is
173 * in FreeBSD.
175 * To implement RACK the original TCP stack was first decomposed
176 * into a functional state machine with individual states
177 * for each of the possible TCP connection states. The do_segment
178 * functions role in life is to mandate the connection supports SACK
179 * initially and then assure that the RACK state matches the conenction
180 * state before calling the states do_segment function. Each
181 * state is simplified due to the fact that the original do_segment
182 * has been decomposed and we *know* what state we are in (no
183 * switches on the state) and all tests for SACK are gone. This
184 * greatly simplifies what each state does.
186 * TCP output is also over-written with a new version since it
187 * must maintain the new rack scoreboard.
190 static int32_t rack_tlp_thresh = 1;
191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
192 static int32_t rack_tlp_use_greater = 1;
193 static int32_t rack_reorder_thresh = 2;
194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
195 * - 60 seconds */
196 static uint32_t rack_pcm_every_n_rounds = 100;
197 static uint32_t rack_pcm_blast = 0;
198 static uint32_t rack_pcm_is_enabled = 1;
199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */
201 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */
202 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */
205 static int32_t rack_rxt_scoreboard_clear_thresh = 2;
206 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */
207 static int32_t rack_rxt_controls = 0;
208 static int32_t rack_fill_cw_state = 0;
209 static uint8_t rack_req_measurements = 1;
210 /* Attack threshold detections */
211 static uint32_t rack_highest_sack_thresh_seen = 0;
212 static uint32_t rack_highest_move_thresh_seen = 0;
213 static uint32_t rack_merge_out_sacks_on_attack = 0;
214 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
215 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */
216 static int32_t rack_hw_rate_caps = 0; /* 1; */
217 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
218 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
219 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
220 static int32_t rack_hw_up_only = 0;
221 static int32_t rack_stats_gets_ms_rtt = 1;
222 static int32_t rack_prr_addbackmax = 2;
223 static int32_t rack_do_hystart = 0;
224 static int32_t rack_apply_rtt_with_reduced_conf = 0;
225 static int32_t rack_hibeta_setting = 0;
226 static int32_t rack_default_pacing_divisor = 250;
227 static uint16_t rack_pacing_min_seg = 0;
228 static int32_t rack_timely_off = 0;
230 static uint32_t sad_seg_size_per = 800; /* 80.0 % */
231 static int32_t rack_pkt_delay = 1000;
232 static int32_t rack_send_a_lot_in_prr = 1;
233 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
234 static int32_t rack_verbose_logging = 0;
235 static int32_t rack_ignore_data_after_close = 1;
236 static int32_t rack_enable_shared_cwnd = 1;
237 static int32_t rack_use_cmp_acks = 1;
238 static int32_t rack_use_fsb = 1;
239 static int32_t rack_use_rfo = 1;
240 static int32_t rack_use_rsm_rfo = 1;
241 static int32_t rack_max_abc_post_recovery = 2;
242 static int32_t rack_client_low_buf = 0;
243 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */
244 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */
245 #ifdef TCP_ACCOUNTING
246 static int32_t rack_tcp_accounting = 0;
247 #endif
248 static int32_t rack_limits_scwnd = 1;
249 static int32_t rack_enable_mqueue_for_nonpaced = 0;
250 static int32_t rack_hybrid_allow_set_maxseg = 0;
251 static int32_t rack_disable_prr = 0;
252 static int32_t use_rack_rr = 1;
253 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
254 static int32_t rack_persist_min = 250000; /* 250usec */
255 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
256 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */
257 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */
258 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
259 static int32_t rack_limit_time_with_srtt = 0;
260 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
261 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */
262 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
263 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
264 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
265 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
266 static int32_t rack_full_buffer_discount = 10;
268 * Currently regular tcp has a rto_min of 30ms
269 * the backoff goes 12 times so that ends up
270 * being a total of 122.850 seconds before a
271 * connection is killed.
273 static uint32_t rack_def_data_window = 20;
274 static uint32_t rack_goal_bdp = 2;
275 static uint32_t rack_min_srtts = 1;
276 static uint32_t rack_min_measure_usec = 0;
277 static int32_t rack_tlp_min = 10000; /* 10ms */
278 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
279 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
280 static const int32_t rack_free_cache = 2;
281 static int32_t rack_hptsi_segments = 40;
282 static int32_t rack_rate_sample_method = USE_RTT_LOW;
283 static int32_t rack_pace_every_seg = 0;
284 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
285 static int32_t rack_slot_reduction = 4;
286 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
287 static int32_t rack_cwnd_block_ends_measure = 0;
288 static int32_t rack_rwnd_block_ends_measure = 0;
289 static int32_t rack_def_profile = 0;
291 static int32_t rack_lower_cwnd_at_tlp = 0;
292 static int32_t rack_always_send_oldest = 0;
293 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
295 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
296 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
297 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
299 /* Probertt */
300 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
301 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
302 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
303 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
304 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
306 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
307 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
308 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
309 static uint32_t rack_probertt_use_min_rtt_exit = 0;
310 static uint32_t rack_probe_rtt_sets_cwnd = 0;
311 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
312 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
313 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
314 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
315 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
316 static uint32_t rack_probertt_filter_life = 10000000;
317 static uint32_t rack_probertt_lower_within = 10;
318 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
319 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
320 static int32_t rack_probertt_clear_is = 1;
321 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
322 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
324 /* Part of pacing */
325 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
327 /* Timely information:
329 * Here we have various control parameters on how
330 * timely may change the multiplier. rack_gain_p5_ub
331 * is associated with timely but not directly influencing
332 * the rate decision like the other variables. It controls
333 * the way fill-cw interacts with timely and caps how much
334 * timely can boost the fill-cw b/w.
336 * The other values are various boost/shrink numbers as well
337 * as potential caps when adjustments are made to the timely
338 * gain (returned by rack_get_output_gain(). Remember too that
339 * the gain returned can be overriden by other factors such as
340 * probeRTT as well as fixed-rate-pacing.
342 static int32_t rack_gain_p5_ub = 250;
343 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
344 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
345 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
346 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
347 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
348 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */
349 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */
350 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
351 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
352 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
353 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
354 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
355 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
356 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
357 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
358 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
359 static int32_t rack_use_max_for_nobackoff = 0;
360 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
361 static int32_t rack_timely_no_stopping = 0;
362 static int32_t rack_down_raise_thresh = 100;
363 static int32_t rack_req_segs = 1;
364 static uint64_t rack_bw_rate_cap = 0;
365 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */
368 /* Rack specific counters */
369 counter_u64_t rack_saw_enobuf;
370 counter_u64_t rack_saw_enobuf_hw;
371 counter_u64_t rack_saw_enetunreach;
372 counter_u64_t rack_persists_sends;
373 counter_u64_t rack_persists_acks;
374 counter_u64_t rack_persists_loss;
375 counter_u64_t rack_persists_lost_ends;
376 counter_u64_t rack_total_bytes;
377 #ifdef INVARIANTS
378 counter_u64_t rack_adjust_map_bw;
379 #endif
380 /* Tail loss probe counters */
381 counter_u64_t rack_tlp_tot;
382 counter_u64_t rack_tlp_newdata;
383 counter_u64_t rack_tlp_retran;
384 counter_u64_t rack_tlp_retran_bytes;
385 counter_u64_t rack_to_tot;
386 counter_u64_t rack_hot_alloc;
387 counter_u64_t rack_to_alloc;
388 counter_u64_t rack_to_alloc_hard;
389 counter_u64_t rack_to_alloc_emerg;
390 counter_u64_t rack_to_alloc_limited;
391 counter_u64_t rack_alloc_limited_conns;
392 counter_u64_t rack_split_limited;
393 counter_u64_t rack_rxt_clamps_cwnd;
394 counter_u64_t rack_rxt_clamps_cwnd_uniq;
396 counter_u64_t rack_multi_single_eq;
397 counter_u64_t rack_proc_non_comp_ack;
399 counter_u64_t rack_fto_send;
400 counter_u64_t rack_fto_rsm_send;
401 counter_u64_t rack_nfto_resend;
402 counter_u64_t rack_non_fto_send;
403 counter_u64_t rack_extended_rfo;
405 counter_u64_t rack_sack_proc_all;
406 counter_u64_t rack_sack_proc_short;
407 counter_u64_t rack_sack_proc_restart;
408 counter_u64_t rack_sack_attacks_detected;
409 counter_u64_t rack_sack_attacks_reversed;
410 counter_u64_t rack_sack_attacks_suspect;
411 counter_u64_t rack_sack_used_next_merge;
412 counter_u64_t rack_sack_splits;
413 counter_u64_t rack_sack_used_prev_merge;
414 counter_u64_t rack_sack_skipped_acked;
415 counter_u64_t rack_ack_total;
416 counter_u64_t rack_express_sack;
417 counter_u64_t rack_sack_total;
418 counter_u64_t rack_move_none;
419 counter_u64_t rack_move_some;
421 counter_u64_t rack_input_idle_reduces;
422 counter_u64_t rack_collapsed_win;
423 counter_u64_t rack_collapsed_win_seen;
424 counter_u64_t rack_collapsed_win_rxt;
425 counter_u64_t rack_collapsed_win_rxt_bytes;
426 counter_u64_t rack_try_scwnd;
427 counter_u64_t rack_hw_pace_init_fail;
428 counter_u64_t rack_hw_pace_lost;
430 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
431 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
434 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
436 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \
437 (tv) = (value) + slop; \
438 if ((u_long)(tv) < (u_long)(tvmin)) \
439 (tv) = (tvmin); \
440 if ((u_long)(tv) > (u_long)(tvmax)) \
441 (tv) = (tvmax); \
442 } while (0)
444 static void
445 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
447 static int
448 rack_process_ack(struct mbuf *m, struct tcphdr *th,
449 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
450 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen);
451 static int
452 rack_process_data(struct mbuf *m, struct tcphdr *th,
453 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
454 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
455 static void
456 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
457 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
458 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
459 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
460 uint8_t limit_type);
461 static struct rack_sendmap *
462 rack_check_recovery_mode(struct tcpcb *tp,
463 uint32_t tsused);
464 static uint32_t
465 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack);
466 static void
467 rack_cong_signal(struct tcpcb *tp,
468 uint32_t type, uint32_t ack, int );
469 static void rack_counter_destroy(void);
470 static int
471 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt);
472 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
473 static void
474 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
475 static void
476 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
477 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos);
478 static void rack_dtor(void *mem, int32_t size, void *arg);
479 static void
480 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
481 uint32_t flex1, uint32_t flex2,
482 uint32_t flex3, uint32_t flex4,
483 uint32_t flex5, uint32_t flex6,
484 uint16_t flex7, uint8_t mod);
486 static void
487 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
488 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
489 struct rack_sendmap *rsm, uint8_t quality);
490 static struct rack_sendmap *
491 rack_find_high_nonack(struct tcp_rack *rack,
492 struct rack_sendmap *rsm);
493 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
494 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
495 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
496 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt);
497 static void
498 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
499 tcp_seq th_ack, int line, uint8_t quality);
500 static void
501 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm);
503 static uint32_t
504 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
505 static int32_t rack_handoff_ok(struct tcpcb *tp);
506 static int32_t rack_init(struct tcpcb *tp, void **ptr);
507 static void rack_init_sysctls(void);
509 static void
510 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
511 struct tcphdr *th, int entered_rec, int dup_ack_struck,
512 int *dsack_seen, int *sacks_seen);
513 static void
514 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
515 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts,
516 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz);
518 static uint64_t rack_get_gp_est(struct tcp_rack *rack);
521 static void
522 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
523 struct rack_sendmap *rsm, uint32_t cts);
524 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
525 static int32_t rack_output(struct tcpcb *tp);
527 static uint32_t
528 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
529 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
530 uint32_t cts, uint32_t segsiz);
531 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
532 static void rack_remxt_tmr(struct tcpcb *tp);
533 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt);
534 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
535 static int32_t rack_stopall(struct tcpcb *tp);
536 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
537 static uint32_t
538 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
539 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz);
540 static void
541 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
542 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz);
543 static int
544 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
545 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
546 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
547 static int
548 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
549 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
550 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
552 static int
553 rack_do_closing(struct mbuf *m, struct tcphdr *th,
554 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
555 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
556 static int
557 rack_do_established(struct mbuf *m, struct tcphdr *th,
558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
560 static int
561 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
562 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
563 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
564 static int
565 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
568 static int
569 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
572 static int
573 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
575 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
576 static int
577 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
580 static int
581 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
584 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts);
585 struct rack_sendmap *
586 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
587 uint32_t tsused);
588 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
589 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
590 static void
591 tcp_rack_partialack(struct tcpcb *tp);
592 static int
593 rack_set_profile(struct tcp_rack *rack, int prof);
594 static void
595 rack_apply_deferred_options(struct tcp_rack *rack);
597 int32_t rack_clear_counter=0;
599 static uint64_t
600 rack_get_lt_bw(struct tcp_rack *rack)
602 struct timeval tv;
603 uint64_t tim, bytes;
605 tim = rack->r_ctl.lt_bw_time;
606 bytes = rack->r_ctl.lt_bw_bytes;
607 if (rack->lt_bw_up) {
608 /* Include all the current bytes too */
609 microuptime(&tv);
610 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq);
611 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
613 if ((bytes != 0) && (tim != 0))
614 return ((bytes * (uint64_t)1000000) / tim);
615 else
616 return (0);
619 static void
620 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8)
622 struct sockopt sopt;
623 struct cc_newreno_opts opt;
624 struct newreno old;
625 struct tcpcb *tp;
626 int error, failed = 0;
628 tp = rack->rc_tp;
629 if (tp->t_cc == NULL) {
630 /* Tcb is leaving */
631 return;
633 rack->rc_pacing_cc_set = 1;
634 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
635 /* Not new-reno we can't play games with beta! */
636 failed = 1;
637 goto out;
640 if (CC_ALGO(tp)->ctl_output == NULL) {
641 /* Huh, not using new-reno so no swaps.? */
642 failed = 2;
643 goto out;
645 old.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
646 /* Get the current values out */
647 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
648 sopt.sopt_dir = SOPT_GET;
649 opt.name = CC_NEWRENO_BETA;
650 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
651 if (error) {
652 failed = 3;
653 goto out;
655 old.beta = opt.val;
656 opt.name = CC_NEWRENO_BETA_ECN;
657 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
658 if (error) {
659 failed = 4;
660 goto out;
662 old.beta_ecn = opt.val;
664 /* Now lets set in the values we have stored */
665 sopt.sopt_dir = SOPT_SET;
666 opt.name = CC_NEWRENO_BETA;
667 opt.val = rack->r_ctl.rc_saved_beta.beta;
668 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
669 if (error) {
670 failed = 5;
671 goto out;
673 opt.name = CC_NEWRENO_BETA_ECN;
674 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
675 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
676 if (error) {
677 failed = 6;
678 goto out;
680 /* Save off the values for restoral */
681 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
682 out:
683 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
684 union tcp_log_stackspecific log;
685 struct timeval tv;
686 struct newreno *ptr;
688 ptr = ((struct newreno *)tp->t_ccv.cc_data);
689 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
690 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
691 log.u_bbr.flex1 = ptr->beta;
692 log.u_bbr.flex2 = ptr->beta_ecn;
693 log.u_bbr.flex3 = ptr->newreno_flags;
694 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
695 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
696 log.u_bbr.flex6 = failed;
697 log.u_bbr.flex7 = rack->gp_ready;
698 log.u_bbr.flex7 <<= 1;
699 log.u_bbr.flex7 |= rack->use_fixed_rate;
700 log.u_bbr.flex7 <<= 1;
701 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
702 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
703 log.u_bbr.flex8 = flex8;
704 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
705 0, &log, false, NULL, NULL, 0, &tv);
709 static void
710 rack_set_cc_pacing(struct tcp_rack *rack)
712 if (rack->rc_pacing_cc_set)
713 return;
715 * Use the swap utility placing in 3 for flex8 to id a
716 * set of a new set of values.
718 rack->rc_pacing_cc_set = 1;
719 rack_swap_beta_values(rack, 3);
722 static void
723 rack_undo_cc_pacing(struct tcp_rack *rack)
725 if (rack->rc_pacing_cc_set == 0)
726 return;
728 * Use the swap utility placing in 4 for flex8 to id a
729 * restoral of the old values.
731 rack->rc_pacing_cc_set = 0;
732 rack_swap_beta_values(rack, 4);
735 static void
736 rack_remove_pacing(struct tcp_rack *rack)
738 if (rack->rc_pacing_cc_set)
739 rack_undo_cc_pacing(rack);
740 if (rack->r_ctl.pacing_method & RACK_REG_PACING)
741 tcp_decrement_paced_conn();
742 if (rack->r_ctl.pacing_method & RACK_DGP_PACING)
743 tcp_dec_dgp_pacing_cnt();
744 rack->rc_always_pace = 0;
745 rack->r_ctl.pacing_method = RACK_PACING_NONE;
746 rack->dgp_on = 0;
747 rack->rc_hybrid_mode = 0;
748 rack->use_fixed_rate = 0;
751 static void
752 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t,
753 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm)
755 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) {
756 union tcp_log_stackspecific log;
757 struct timeval tv;
759 memset(&log, 0, sizeof(log));
760 log.u_bbr.flex1 = seq_end;
761 log.u_bbr.flex2 = rack->rc_tp->gput_seq;
762 log.u_bbr.flex3 = ack_end_t;
763 log.u_bbr.flex4 = rack->rc_tp->gput_ts;
764 log.u_bbr.flex5 = send_end_t;
765 log.u_bbr.flex6 = rack->rc_tp->gput_ack;
766 log.u_bbr.flex7 = mode;
767 log.u_bbr.flex8 = 69;
768 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts;
769 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts;
770 log.u_bbr.pkts_out = line;
771 log.u_bbr.cwnd_gain = rack->app_limited_needs_set;
772 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt;
773 log.u_bbr.epoch = rack->r_ctl.current_round;
774 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost;
775 if (rsm != NULL) {
776 log.u_bbr.applimited = rsm->r_start;
777 log.u_bbr.delivered = rsm->r_end;
778 log.u_bbr.epoch = rsm->r_flags;
780 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
781 TCP_LOG_EVENTP(rack->rc_tp, NULL,
782 &rack->rc_inp->inp_socket->so_rcv,
783 &rack->rc_inp->inp_socket->so_snd,
784 BBR_LOG_HPTSI_CALC, 0,
785 0, &log, false, &tv);
789 static int
790 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
792 uint32_t stat;
793 int32_t error;
795 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
796 if (error || req->newptr == NULL)
797 return error;
799 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
800 if (error)
801 return (error);
802 if (stat == 1) {
803 #ifdef INVARIANTS
804 printf("Clearing RACK counters\n");
805 #endif
806 counter_u64_zero(rack_tlp_tot);
807 counter_u64_zero(rack_tlp_newdata);
808 counter_u64_zero(rack_tlp_retran);
809 counter_u64_zero(rack_tlp_retran_bytes);
810 counter_u64_zero(rack_to_tot);
811 counter_u64_zero(rack_saw_enobuf);
812 counter_u64_zero(rack_saw_enobuf_hw);
813 counter_u64_zero(rack_saw_enetunreach);
814 counter_u64_zero(rack_persists_sends);
815 counter_u64_zero(rack_total_bytes);
816 counter_u64_zero(rack_persists_acks);
817 counter_u64_zero(rack_persists_loss);
818 counter_u64_zero(rack_persists_lost_ends);
819 #ifdef INVARIANTS
820 counter_u64_zero(rack_adjust_map_bw);
821 #endif
822 counter_u64_zero(rack_to_alloc_hard);
823 counter_u64_zero(rack_to_alloc_emerg);
824 counter_u64_zero(rack_sack_proc_all);
825 counter_u64_zero(rack_fto_send);
826 counter_u64_zero(rack_fto_rsm_send);
827 counter_u64_zero(rack_extended_rfo);
828 counter_u64_zero(rack_hw_pace_init_fail);
829 counter_u64_zero(rack_hw_pace_lost);
830 counter_u64_zero(rack_non_fto_send);
831 counter_u64_zero(rack_nfto_resend);
832 counter_u64_zero(rack_sack_proc_short);
833 counter_u64_zero(rack_sack_proc_restart);
834 counter_u64_zero(rack_to_alloc);
835 counter_u64_zero(rack_to_alloc_limited);
836 counter_u64_zero(rack_alloc_limited_conns);
837 counter_u64_zero(rack_split_limited);
838 counter_u64_zero(rack_rxt_clamps_cwnd);
839 counter_u64_zero(rack_rxt_clamps_cwnd_uniq);
840 counter_u64_zero(rack_multi_single_eq);
841 counter_u64_zero(rack_proc_non_comp_ack);
842 counter_u64_zero(rack_sack_attacks_detected);
843 counter_u64_zero(rack_sack_attacks_reversed);
844 counter_u64_zero(rack_sack_attacks_suspect);
845 counter_u64_zero(rack_sack_used_next_merge);
846 counter_u64_zero(rack_sack_used_prev_merge);
847 counter_u64_zero(rack_sack_splits);
848 counter_u64_zero(rack_sack_skipped_acked);
849 counter_u64_zero(rack_ack_total);
850 counter_u64_zero(rack_express_sack);
851 counter_u64_zero(rack_sack_total);
852 counter_u64_zero(rack_move_none);
853 counter_u64_zero(rack_move_some);
854 counter_u64_zero(rack_try_scwnd);
855 counter_u64_zero(rack_collapsed_win);
856 counter_u64_zero(rack_collapsed_win_rxt);
857 counter_u64_zero(rack_collapsed_win_seen);
858 counter_u64_zero(rack_collapsed_win_rxt_bytes);
859 } else if (stat == 2) {
860 #ifdef INVARIANTS
861 printf("Clearing RACK option array\n");
862 #endif
863 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE);
864 } else if (stat == 3) {
865 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n");
866 } else if (stat == 4) {
867 #ifdef INVARIANTS
868 printf("Clearing RACK out size array\n");
869 #endif
870 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE);
872 rack_clear_counter = 0;
873 return (0);
876 static void
877 rack_init_sysctls(void)
879 struct sysctl_oid *rack_counters;
880 struct sysctl_oid *rack_attack;
881 struct sysctl_oid *rack_pacing;
882 struct sysctl_oid *rack_timely;
883 struct sysctl_oid *rack_timers;
884 struct sysctl_oid *rack_tlp;
885 struct sysctl_oid *rack_misc;
886 struct sysctl_oid *rack_features;
887 struct sysctl_oid *rack_measure;
888 struct sysctl_oid *rack_probertt;
889 struct sysctl_oid *rack_hw_pacing;
891 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
892 SYSCTL_CHILDREN(rack_sysctl_root),
893 OID_AUTO,
894 "sack_attack",
895 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
896 "Rack Sack Attack Counters and Controls");
897 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
898 SYSCTL_CHILDREN(rack_sysctl_root),
899 OID_AUTO,
900 "stats",
901 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
902 "Rack Counters");
903 SYSCTL_ADD_S32(&rack_sysctl_ctx,
904 SYSCTL_CHILDREN(rack_sysctl_root),
905 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
906 &rack_rate_sample_method , USE_RTT_LOW,
907 "What method should we use for rate sampling 0=high, 1=low ");
908 /* Probe rtt related controls */
909 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
910 SYSCTL_CHILDREN(rack_sysctl_root),
911 OID_AUTO,
912 "probertt",
913 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
914 "ProbeRTT related Controls");
915 SYSCTL_ADD_U16(&rack_sysctl_ctx,
916 SYSCTL_CHILDREN(rack_probertt),
917 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
918 &rack_atexit_prtt_hbp, 130,
919 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
920 SYSCTL_ADD_U16(&rack_sysctl_ctx,
921 SYSCTL_CHILDREN(rack_probertt),
922 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
923 &rack_atexit_prtt, 130,
924 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
925 SYSCTL_ADD_U16(&rack_sysctl_ctx,
926 SYSCTL_CHILDREN(rack_probertt),
927 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
928 &rack_per_of_gp_probertt, 60,
929 "What percentage of goodput do we pace at in probertt");
930 SYSCTL_ADD_U16(&rack_sysctl_ctx,
931 SYSCTL_CHILDREN(rack_probertt),
932 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
933 &rack_per_of_gp_probertt_reduce, 10,
934 "What percentage of goodput do we reduce every gp_srtt");
935 SYSCTL_ADD_U16(&rack_sysctl_ctx,
936 SYSCTL_CHILDREN(rack_probertt),
937 OID_AUTO, "gp_per_low", CTLFLAG_RW,
938 &rack_per_of_gp_lowthresh, 40,
939 "What percentage of goodput do we allow the multiplier to fall to");
940 SYSCTL_ADD_U32(&rack_sysctl_ctx,
941 SYSCTL_CHILDREN(rack_probertt),
942 OID_AUTO, "time_between", CTLFLAG_RW,
943 & rack_time_between_probertt, 96000000,
944 "How many useconds between the lowest rtt falling must past before we enter probertt");
945 SYSCTL_ADD_U32(&rack_sysctl_ctx,
946 SYSCTL_CHILDREN(rack_probertt),
947 OID_AUTO, "safety", CTLFLAG_RW,
948 &rack_probe_rtt_safety_val, 2000000,
949 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
950 SYSCTL_ADD_U32(&rack_sysctl_ctx,
951 SYSCTL_CHILDREN(rack_probertt),
952 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
953 &rack_probe_rtt_sets_cwnd, 0,
954 "Do we set the cwnd too (if always_lower is on)");
955 SYSCTL_ADD_U32(&rack_sysctl_ctx,
956 SYSCTL_CHILDREN(rack_probertt),
957 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
958 &rack_max_drain_wait, 2,
959 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
960 SYSCTL_ADD_U32(&rack_sysctl_ctx,
961 SYSCTL_CHILDREN(rack_probertt),
962 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
963 &rack_must_drain, 1,
964 "We must drain this many gp_srtt's waiting for flight to reach goal");
965 SYSCTL_ADD_U32(&rack_sysctl_ctx,
966 SYSCTL_CHILDREN(rack_probertt),
967 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
968 &rack_probertt_use_min_rtt_entry, 1,
969 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
970 SYSCTL_ADD_U32(&rack_sysctl_ctx,
971 SYSCTL_CHILDREN(rack_probertt),
972 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
973 &rack_probertt_use_min_rtt_exit, 0,
974 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
975 SYSCTL_ADD_U32(&rack_sysctl_ctx,
976 SYSCTL_CHILDREN(rack_probertt),
977 OID_AUTO, "length_div", CTLFLAG_RW,
978 &rack_probertt_gpsrtt_cnt_div, 0,
979 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
980 SYSCTL_ADD_U32(&rack_sysctl_ctx,
981 SYSCTL_CHILDREN(rack_probertt),
982 OID_AUTO, "length_mul", CTLFLAG_RW,
983 &rack_probertt_gpsrtt_cnt_mul, 0,
984 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
985 SYSCTL_ADD_U32(&rack_sysctl_ctx,
986 SYSCTL_CHILDREN(rack_probertt),
987 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
988 &rack_min_probertt_hold, 200000,
989 "What is the minimum time we hold probertt at target");
990 SYSCTL_ADD_U32(&rack_sysctl_ctx,
991 SYSCTL_CHILDREN(rack_probertt),
992 OID_AUTO, "filter_life", CTLFLAG_RW,
993 &rack_probertt_filter_life, 10000000,
994 "What is the time for the filters life in useconds");
995 SYSCTL_ADD_U32(&rack_sysctl_ctx,
996 SYSCTL_CHILDREN(rack_probertt),
997 OID_AUTO, "lower_within", CTLFLAG_RW,
998 &rack_probertt_lower_within, 10,
999 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
1000 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1001 SYSCTL_CHILDREN(rack_probertt),
1002 OID_AUTO, "must_move", CTLFLAG_RW,
1003 &rack_min_rtt_movement, 250,
1004 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
1005 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1006 SYSCTL_CHILDREN(rack_probertt),
1007 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
1008 &rack_probertt_clear_is, 1,
1009 "Do we clear I/S counts on exiting probe-rtt");
1010 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1011 SYSCTL_CHILDREN(rack_probertt),
1012 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
1013 &rack_max_drain_hbp, 1,
1014 "How many extra drain gpsrtt's do we get in highly buffered paths");
1015 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1016 SYSCTL_CHILDREN(rack_probertt),
1017 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
1018 &rack_hbp_thresh, 3,
1019 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
1020 /* Pacing related sysctls */
1021 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1022 SYSCTL_CHILDREN(rack_sysctl_root),
1023 OID_AUTO,
1024 "pacing",
1025 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1026 "Pacing related Controls");
1027 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1028 SYSCTL_CHILDREN(rack_pacing),
1029 OID_AUTO, "pcm_enabled", CTLFLAG_RW,
1030 &rack_pcm_is_enabled, 1,
1031 "Do we by default do PCM measurements?");
1032 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1033 SYSCTL_CHILDREN(rack_pacing),
1034 OID_AUTO, "pcm_rnds", CTLFLAG_RW,
1035 &rack_pcm_every_n_rounds, 100,
1036 "How many rounds before we need to do a PCM measurement");
1037 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1038 SYSCTL_CHILDREN(rack_pacing),
1039 OID_AUTO, "pcm_blast", CTLFLAG_RW,
1040 &rack_pcm_blast, 0,
1041 "Blast out the full cwnd/rwnd when doing a PCM measurement");
1042 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1043 SYSCTL_CHILDREN(rack_pacing),
1044 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW,
1045 &rack_gp_gain_req, 1200,
1046 "How much do we have to increase the GP to record the round 1200 = 120.0");
1047 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1048 SYSCTL_CHILDREN(rack_pacing),
1049 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW,
1050 &rack_rnd_cnt_req, 0x10005,
1051 "How many rounds less than rnd_gp_gain will drop us out of SS");
1052 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1053 SYSCTL_CHILDREN(rack_pacing),
1054 OID_AUTO, "no_timely", CTLFLAG_RW,
1055 &rack_timely_off, 0,
1056 "Do we not use timely in DGP?");
1057 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1058 SYSCTL_CHILDREN(rack_pacing),
1059 OID_AUTO, "fullbufdisc", CTLFLAG_RW,
1060 &rack_full_buffer_discount, 10,
1061 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?");
1062 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1063 SYSCTL_CHILDREN(rack_pacing),
1064 OID_AUTO, "fillcw", CTLFLAG_RW,
1065 &rack_fill_cw_state, 0,
1066 "Enable fillcw on new connections (default=0 off)?");
1067 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1068 SYSCTL_CHILDREN(rack_pacing),
1069 OID_AUTO, "min_burst", CTLFLAG_RW,
1070 &rack_pacing_min_seg, 0,
1071 "What is the min burst size for pacing (0 disables)?");
1072 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1073 SYSCTL_CHILDREN(rack_pacing),
1074 OID_AUTO, "divisor", CTLFLAG_RW,
1075 &rack_default_pacing_divisor, 250,
1076 "What is the default divisor given to the rl code?");
1077 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1078 SYSCTL_CHILDREN(rack_pacing),
1079 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW,
1080 &rack_bw_multipler, 0,
1081 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?");
1082 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1083 SYSCTL_CHILDREN(rack_pacing),
1084 OID_AUTO, "max_pace_over", CTLFLAG_RW,
1085 &rack_max_per_above, 30,
1086 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
1087 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1088 SYSCTL_CHILDREN(rack_pacing),
1089 OID_AUTO, "allow1mss", CTLFLAG_RW,
1090 &rack_pace_one_seg, 0,
1091 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?");
1092 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1093 SYSCTL_CHILDREN(rack_pacing),
1094 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
1095 &rack_limit_time_with_srtt, 0,
1096 "Do we limit pacing time based on srtt");
1097 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1098 SYSCTL_CHILDREN(rack_pacing),
1099 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
1100 &rack_per_of_gp_ss, 250,
1101 "If non zero, what percentage of goodput to pace at in slow start");
1102 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1103 SYSCTL_CHILDREN(rack_pacing),
1104 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1105 &rack_per_of_gp_ca, 150,
1106 "If non zero, what percentage of goodput to pace at in congestion avoidance");
1107 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1108 SYSCTL_CHILDREN(rack_pacing),
1109 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1110 &rack_per_of_gp_rec, 200,
1111 "If non zero, what percentage of goodput to pace at in recovery");
1112 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1113 SYSCTL_CHILDREN(rack_pacing),
1114 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1115 &rack_hptsi_segments, 40,
1116 "What size is the max for TSO segments in pacing and burst mitigation");
1117 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1118 SYSCTL_CHILDREN(rack_pacing),
1119 OID_AUTO, "burst_reduces", CTLFLAG_RW,
1120 &rack_slot_reduction, 4,
1121 "When doing only burst mitigation what is the reduce divisor");
1122 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1123 SYSCTL_CHILDREN(rack_sysctl_root),
1124 OID_AUTO, "use_pacing", CTLFLAG_RW,
1125 &rack_pace_every_seg, 0,
1126 "If set we use pacing, if clear we use only the original burst mitigation");
1127 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1128 SYSCTL_CHILDREN(rack_pacing),
1129 OID_AUTO, "rate_cap", CTLFLAG_RW,
1130 &rack_bw_rate_cap, 0,
1131 "If set we apply this value to the absolute rate cap used by pacing");
1132 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1133 SYSCTL_CHILDREN(rack_pacing),
1134 OID_AUTO, "fillcw_cap", CTLFLAG_RW,
1135 &rack_fillcw_bw_cap, 3750000,
1136 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?");
1137 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1138 SYSCTL_CHILDREN(rack_sysctl_root),
1139 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1140 &rack_req_measurements, 1,
1141 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1142 /* Hardware pacing */
1143 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1144 SYSCTL_CHILDREN(rack_sysctl_root),
1145 OID_AUTO,
1146 "hdwr_pacing",
1147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1148 "Pacing related Controls");
1149 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1150 SYSCTL_CHILDREN(rack_hw_pacing),
1151 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1152 &rack_hw_rwnd_factor, 2,
1153 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1154 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1155 SYSCTL_CHILDREN(rack_hw_pacing),
1156 OID_AUTO, "precheck", CTLFLAG_RW,
1157 &rack_hw_check_queue, 0,
1158 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?");
1159 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1160 SYSCTL_CHILDREN(rack_hw_pacing),
1161 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1162 &rack_enobuf_hw_boost_mult, 0,
1163 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1164 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1165 SYSCTL_CHILDREN(rack_hw_pacing),
1166 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1167 &rack_enobuf_hw_max, 2,
1168 "What is the max boost the pacing time if we see a ENOBUFS?");
1169 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1170 SYSCTL_CHILDREN(rack_hw_pacing),
1171 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1172 &rack_enobuf_hw_min, 2,
1173 "What is the min boost the pacing time if we see a ENOBUFS?");
1174 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1175 SYSCTL_CHILDREN(rack_hw_pacing),
1176 OID_AUTO, "enable", CTLFLAG_RW,
1177 &rack_enable_hw_pacing, 0,
1178 "Should RACK attempt to use hw pacing?");
1179 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1180 SYSCTL_CHILDREN(rack_hw_pacing),
1181 OID_AUTO, "rate_cap", CTLFLAG_RW,
1182 &rack_hw_rate_caps, 0,
1183 "Does the highest hardware pacing rate cap the rate we will send at??");
1184 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1185 SYSCTL_CHILDREN(rack_hw_pacing),
1186 OID_AUTO, "uncap_per", CTLFLAG_RW,
1187 &rack_hw_rate_cap_per, 0,
1188 "If you go over b/w by this amount you will be uncapped (0 = never)");
1189 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1190 SYSCTL_CHILDREN(rack_hw_pacing),
1191 OID_AUTO, "rate_min", CTLFLAG_RW,
1192 &rack_hw_rate_min, 0,
1193 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1194 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1195 SYSCTL_CHILDREN(rack_hw_pacing),
1196 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1197 &rack_hw_rate_to_low, 0,
1198 "If we fall below this rate, dis-engage hw pacing?");
1199 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1200 SYSCTL_CHILDREN(rack_hw_pacing),
1201 OID_AUTO, "up_only", CTLFLAG_RW,
1202 &rack_hw_up_only, 0,
1203 "Do we allow hw pacing to lower the rate selected?");
1204 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1205 SYSCTL_CHILDREN(rack_hw_pacing),
1206 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1207 &rack_hw_pace_extra_slots, 0,
1208 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1209 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1210 SYSCTL_CHILDREN(rack_sysctl_root),
1211 OID_AUTO,
1212 "timely",
1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1214 "Rack Timely RTT Controls");
1215 /* Timely based GP dynmics */
1216 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1217 SYSCTL_CHILDREN(rack_timely),
1218 OID_AUTO, "upper", CTLFLAG_RW,
1219 &rack_gp_per_bw_mul_up, 2,
1220 "Rack timely upper range for equal b/w (in percentage)");
1221 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1222 SYSCTL_CHILDREN(rack_timely),
1223 OID_AUTO, "lower", CTLFLAG_RW,
1224 &rack_gp_per_bw_mul_down, 4,
1225 "Rack timely lower range for equal b/w (in percentage)");
1226 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1227 SYSCTL_CHILDREN(rack_timely),
1228 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1229 &rack_gp_rtt_maxmul, 3,
1230 "Rack timely multiplier of lowest rtt for rtt_max");
1231 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1232 SYSCTL_CHILDREN(rack_timely),
1233 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1234 &rack_gp_rtt_mindiv, 4,
1235 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1236 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1237 SYSCTL_CHILDREN(rack_timely),
1238 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1239 &rack_gp_rtt_minmul, 1,
1240 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1241 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1242 SYSCTL_CHILDREN(rack_timely),
1243 OID_AUTO, "decrease", CTLFLAG_RW,
1244 &rack_gp_decrease_per, 80,
1245 "Rack timely Beta value 80 = .8 (scaled by 100)");
1246 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1247 SYSCTL_CHILDREN(rack_timely),
1248 OID_AUTO, "increase", CTLFLAG_RW,
1249 &rack_gp_increase_per, 2,
1250 "Rack timely increase perentage of our GP multiplication factor");
1251 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1252 SYSCTL_CHILDREN(rack_timely),
1253 OID_AUTO, "lowerbound", CTLFLAG_RW,
1254 &rack_per_lower_bound, 50,
1255 "Rack timely lowest percentage we allow GP multiplier to fall to");
1256 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1257 SYSCTL_CHILDREN(rack_timely),
1258 OID_AUTO, "p5_upper", CTLFLAG_RW,
1259 &rack_gain_p5_ub, 250,
1260 "Profile 5 upper bound to timely gain");
1262 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1263 SYSCTL_CHILDREN(rack_timely),
1264 OID_AUTO, "upperboundss", CTLFLAG_RW,
1265 &rack_per_upper_bound_ss, 0,
1266 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1267 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1268 SYSCTL_CHILDREN(rack_timely),
1269 OID_AUTO, "upperboundca", CTLFLAG_RW,
1270 &rack_per_upper_bound_ca, 0,
1271 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1272 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1273 SYSCTL_CHILDREN(rack_timely),
1274 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1275 &rack_do_dyn_mul, 0,
1276 "Rack timely do we enable dynmaic timely goodput by default");
1277 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1278 SYSCTL_CHILDREN(rack_timely),
1279 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1280 &rack_gp_no_rec_chg, 1,
1281 "Rack timely do we prohibit the recovery multiplier from being lowered");
1282 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1283 SYSCTL_CHILDREN(rack_timely),
1284 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1285 &rack_timely_dec_clear, 6,
1286 "Rack timely what threshold do we count to before another boost during b/w decent");
1287 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1288 SYSCTL_CHILDREN(rack_timely),
1289 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1290 &rack_timely_max_push_rise, 3,
1291 "Rack timely how many times do we push up with b/w increase");
1292 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1293 SYSCTL_CHILDREN(rack_timely),
1294 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1295 &rack_timely_max_push_drop, 3,
1296 "Rack timely how many times do we push back on b/w decent");
1297 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1298 SYSCTL_CHILDREN(rack_timely),
1299 OID_AUTO, "min_segs", CTLFLAG_RW,
1300 &rack_timely_min_segs, 4,
1301 "Rack timely when setting the cwnd what is the min num segments");
1302 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1303 SYSCTL_CHILDREN(rack_timely),
1304 OID_AUTO, "noback_max", CTLFLAG_RW,
1305 &rack_use_max_for_nobackoff, 0,
1306 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1307 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1308 SYSCTL_CHILDREN(rack_timely),
1309 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1310 &rack_timely_int_timely_only, 0,
1311 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1312 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1313 SYSCTL_CHILDREN(rack_timely),
1314 OID_AUTO, "nonstop", CTLFLAG_RW,
1315 &rack_timely_no_stopping, 0,
1316 "Rack timely don't stop increase");
1317 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1318 SYSCTL_CHILDREN(rack_timely),
1319 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1320 &rack_down_raise_thresh, 100,
1321 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1322 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1323 SYSCTL_CHILDREN(rack_timely),
1324 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1325 &rack_req_segs, 1,
1326 "Bottom dragging if not these many segments outstanding and room");
1328 /* TLP and Rack related parameters */
1329 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1330 SYSCTL_CHILDREN(rack_sysctl_root),
1331 OID_AUTO,
1332 "tlp",
1333 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1334 "TLP and Rack related Controls");
1335 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1336 SYSCTL_CHILDREN(rack_tlp),
1337 OID_AUTO, "use_rrr", CTLFLAG_RW,
1338 &use_rack_rr, 1,
1339 "Do we use Rack Rapid Recovery");
1340 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1341 SYSCTL_CHILDREN(rack_tlp),
1342 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1343 &rack_max_abc_post_recovery, 2,
1344 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1345 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1346 SYSCTL_CHILDREN(rack_tlp),
1347 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1348 &rack_non_rxt_use_cr, 0,
1349 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1350 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1351 SYSCTL_CHILDREN(rack_tlp),
1352 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1353 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1354 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1355 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1356 SYSCTL_CHILDREN(rack_tlp),
1357 OID_AUTO, "limit", CTLFLAG_RW,
1358 &rack_tlp_limit, 2,
1359 "How many TLP's can be sent without sending new data");
1360 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1361 SYSCTL_CHILDREN(rack_tlp),
1362 OID_AUTO, "use_greater", CTLFLAG_RW,
1363 &rack_tlp_use_greater, 1,
1364 "Should we use the rack_rtt time if its greater than srtt");
1365 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1366 SYSCTL_CHILDREN(rack_tlp),
1367 OID_AUTO, "tlpminto", CTLFLAG_RW,
1368 &rack_tlp_min, 10000,
1369 "TLP minimum timeout per the specification (in microseconds)");
1370 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1371 SYSCTL_CHILDREN(rack_tlp),
1372 OID_AUTO, "send_oldest", CTLFLAG_RW,
1373 &rack_always_send_oldest, 0,
1374 "Should we always send the oldest TLP and RACK-TLP");
1375 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1376 SYSCTL_CHILDREN(rack_tlp),
1377 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1378 &rack_lower_cwnd_at_tlp, 0,
1379 "When a TLP completes a retran should we enter recovery");
1380 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1381 SYSCTL_CHILDREN(rack_tlp),
1382 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1383 &rack_reorder_thresh, 2,
1384 "What factor for rack will be added when seeing reordering (shift right)");
1385 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1386 SYSCTL_CHILDREN(rack_tlp),
1387 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1388 &rack_tlp_thresh, 1,
1389 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1390 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1391 SYSCTL_CHILDREN(rack_tlp),
1392 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1393 &rack_reorder_fade, 60000000,
1394 "Does reorder detection fade, if so how many microseconds (0 means never)");
1395 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1396 SYSCTL_CHILDREN(rack_tlp),
1397 OID_AUTO, "pktdelay", CTLFLAG_RW,
1398 &rack_pkt_delay, 1000,
1399 "Extra RACK time (in microseconds) besides reordering thresh");
1401 /* Timer related controls */
1402 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1403 SYSCTL_CHILDREN(rack_sysctl_root),
1404 OID_AUTO,
1405 "timers",
1406 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1407 "Timer related controls");
1408 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1409 SYSCTL_CHILDREN(rack_timers),
1410 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW,
1411 &rack_ssthresh_rest_rto_rec, 0,
1412 "When doing recovery -> rto -> recovery do we reset SSthresh?");
1413 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1414 SYSCTL_CHILDREN(rack_timers),
1415 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW,
1416 &rack_rxt_scoreboard_clear_thresh, 2,
1417 "How many RTO's are allowed before we clear the scoreboard");
1418 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1419 SYSCTL_CHILDREN(rack_timers),
1420 OID_AUTO, "honor_hpts_min", CTLFLAG_RW,
1421 &rack_honors_hpts_min_to, 1,
1422 "Do rack pacing timers honor hpts min timeout");
1423 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1424 SYSCTL_CHILDREN(rack_timers),
1425 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW,
1426 &rack_max_reduce, 10,
1427 "Max percentage we will reduce slot by for pacing when we are behind");
1428 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1429 SYSCTL_CHILDREN(rack_timers),
1430 OID_AUTO, "persmin", CTLFLAG_RW,
1431 &rack_persist_min, 250000,
1432 "What is the minimum time in microseconds between persists");
1433 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1434 SYSCTL_CHILDREN(rack_timers),
1435 OID_AUTO, "persmax", CTLFLAG_RW,
1436 &rack_persist_max, 2000000,
1437 "What is the largest delay in microseconds between persists");
1438 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1439 SYSCTL_CHILDREN(rack_timers),
1440 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1441 &rack_delayed_ack_time, 40000,
1442 "Delayed ack time (40ms in microseconds)");
1443 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1444 SYSCTL_CHILDREN(rack_timers),
1445 OID_AUTO, "minrto", CTLFLAG_RW,
1446 &rack_rto_min, 30000,
1447 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1448 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1449 SYSCTL_CHILDREN(rack_timers),
1450 OID_AUTO, "maxrto", CTLFLAG_RW,
1451 &rack_rto_max, 4000000,
1452 "Maximum RTO in microseconds -- should be at least as large as min_rto");
1453 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1454 SYSCTL_CHILDREN(rack_timers),
1455 OID_AUTO, "minto", CTLFLAG_RW,
1456 &rack_min_to, 1000,
1457 "Minimum rack timeout in microseconds");
1458 /* Measure controls */
1459 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1460 SYSCTL_CHILDREN(rack_sysctl_root),
1461 OID_AUTO,
1462 "measure",
1463 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1464 "Measure related controls");
1465 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1466 SYSCTL_CHILDREN(rack_measure),
1467 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1468 &rack_wma_divisor, 8,
1469 "When doing b/w calculation what is the divisor for the WMA");
1470 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1471 SYSCTL_CHILDREN(rack_measure),
1472 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1473 &rack_cwnd_block_ends_measure, 0,
1474 "Does a cwnd just-return end the measurement window (app limited)");
1475 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1476 SYSCTL_CHILDREN(rack_measure),
1477 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1478 &rack_rwnd_block_ends_measure, 0,
1479 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1480 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1481 SYSCTL_CHILDREN(rack_measure),
1482 OID_AUTO, "min_target", CTLFLAG_RW,
1483 &rack_def_data_window, 20,
1484 "What is the minimum target window (in mss) for a GP measurements");
1485 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1486 SYSCTL_CHILDREN(rack_measure),
1487 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1488 &rack_goal_bdp, 2,
1489 "What is the goal BDP to measure");
1490 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1491 SYSCTL_CHILDREN(rack_measure),
1492 OID_AUTO, "min_srtts", CTLFLAG_RW,
1493 &rack_min_srtts, 1,
1494 "What is the goal BDP to measure");
1495 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1496 SYSCTL_CHILDREN(rack_measure),
1497 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1498 &rack_min_measure_usec, 0,
1499 "What is the Minimum time time for a measurement if 0, this is off");
1500 /* Features */
1501 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1502 SYSCTL_CHILDREN(rack_sysctl_root),
1503 OID_AUTO,
1504 "features",
1505 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1506 "Feature controls");
1507 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1508 SYSCTL_CHILDREN(rack_features),
1509 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW,
1510 &rack_hybrid_allow_set_maxseg, 0,
1511 "Should hybrid pacing allow the setmss command");
1512 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1513 SYSCTL_CHILDREN(rack_features),
1514 OID_AUTO, "cmpack", CTLFLAG_RW,
1515 &rack_use_cmp_acks, 1,
1516 "Should RACK have LRO send compressed acks");
1517 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1518 SYSCTL_CHILDREN(rack_features),
1519 OID_AUTO, "fsb", CTLFLAG_RW,
1520 &rack_use_fsb, 1,
1521 "Should RACK use the fast send block?");
1522 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1523 SYSCTL_CHILDREN(rack_features),
1524 OID_AUTO, "rfo", CTLFLAG_RW,
1525 &rack_use_rfo, 1,
1526 "Should RACK use rack_fast_output()?");
1527 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1528 SYSCTL_CHILDREN(rack_features),
1529 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1530 &rack_use_rsm_rfo, 1,
1531 "Should RACK use rack_fast_rsm_output()?");
1532 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1533 SYSCTL_CHILDREN(rack_features),
1534 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1535 &rack_enable_mqueue_for_nonpaced, 0,
1536 "Should RACK use mbuf queuing for non-paced connections");
1537 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1538 SYSCTL_CHILDREN(rack_features),
1539 OID_AUTO, "hystartplusplus", CTLFLAG_RW,
1540 &rack_do_hystart, 0,
1541 "Should RACK enable HyStart++ on connections?");
1542 /* Misc rack controls */
1543 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1544 SYSCTL_CHILDREN(rack_sysctl_root),
1545 OID_AUTO,
1546 "misc",
1547 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1548 "Misc related controls");
1549 #ifdef TCP_ACCOUNTING
1550 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1551 SYSCTL_CHILDREN(rack_misc),
1552 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1553 &rack_tcp_accounting, 0,
1554 "Should we turn on TCP accounting for all rack sessions?");
1555 #endif
1556 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1557 SYSCTL_CHILDREN(rack_misc),
1558 OID_AUTO, "dnd", CTLFLAG_RW,
1559 &rack_dnd_default, 0,
1560 "Do not disturb default for rack_rrr = 3");
1561 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1562 SYSCTL_CHILDREN(rack_misc),
1563 OID_AUTO, "sad_seg_per", CTLFLAG_RW,
1564 &sad_seg_size_per, 800,
1565 "Percentage of segment size needed in a sack 800 = 80.0?");
1566 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1567 SYSCTL_CHILDREN(rack_misc),
1568 OID_AUTO, "rxt_controls", CTLFLAG_RW,
1569 &rack_rxt_controls, 0,
1570 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?");
1571 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1572 SYSCTL_CHILDREN(rack_misc),
1573 OID_AUTO, "rack_hibeta", CTLFLAG_RW,
1574 &rack_hibeta_setting, 0,
1575 "Do we ue a high beta (80 instead of 50)?");
1576 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1577 SYSCTL_CHILDREN(rack_misc),
1578 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW,
1579 &rack_apply_rtt_with_reduced_conf, 0,
1580 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?");
1581 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1582 SYSCTL_CHILDREN(rack_misc),
1583 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW,
1584 &rack_dsack_std_based, 3,
1585 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?");
1586 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1587 SYSCTL_CHILDREN(rack_misc),
1588 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1589 &rack_prr_addbackmax, 2,
1590 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1591 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1592 SYSCTL_CHILDREN(rack_misc),
1593 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1594 &rack_stats_gets_ms_rtt, 1,
1595 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1596 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1597 SYSCTL_CHILDREN(rack_misc),
1598 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1599 &rack_client_low_buf, 0,
1600 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1601 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1602 SYSCTL_CHILDREN(rack_misc),
1603 OID_AUTO, "defprofile", CTLFLAG_RW,
1604 &rack_def_profile, 0,
1605 "Should RACK use a default profile (0=no, num == profile num)?");
1606 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1607 SYSCTL_CHILDREN(rack_misc),
1608 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1609 &rack_enable_shared_cwnd, 1,
1610 "Should RACK try to use the shared cwnd on connections where allowed");
1611 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1612 SYSCTL_CHILDREN(rack_misc),
1613 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1614 &rack_limits_scwnd, 1,
1615 "Should RACK place low end time limits on the shared cwnd feature");
1616 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1617 SYSCTL_CHILDREN(rack_misc),
1618 OID_AUTO, "no_prr", CTLFLAG_RW,
1619 &rack_disable_prr, 0,
1620 "Should RACK not use prr and only pace (must have pacing on)");
1621 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1622 SYSCTL_CHILDREN(rack_misc),
1623 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1624 &rack_verbose_logging, 0,
1625 "Should RACK black box logging be verbose");
1626 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1627 SYSCTL_CHILDREN(rack_misc),
1628 OID_AUTO, "data_after_close", CTLFLAG_RW,
1629 &rack_ignore_data_after_close, 1,
1630 "Do we hold off sending a RST until all pending data is ack'd");
1631 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1632 SYSCTL_CHILDREN(rack_misc),
1633 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1634 &rack_sack_not_required, 1,
1635 "Do we allow rack to run on connections not supporting SACK");
1636 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1637 SYSCTL_CHILDREN(rack_misc),
1638 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1639 &rack_send_a_lot_in_prr, 1,
1640 "Send a lot in prr");
1641 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1642 SYSCTL_CHILDREN(rack_misc),
1643 OID_AUTO, "autoscale", CTLFLAG_RW,
1644 &rack_autosndbuf_inc, 20,
1645 "What percentage should rack scale up its snd buffer by?");
1648 /* Sack Attacker detection stuff */
1649 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1650 SYSCTL_CHILDREN(rack_attack),
1651 OID_AUTO, "merge_out", CTLFLAG_RW,
1652 &rack_merge_out_sacks_on_attack, 0,
1653 "Do we merge the sendmap when we decide we are being attacked?");
1655 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1656 SYSCTL_CHILDREN(rack_attack),
1657 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1658 &rack_highest_sack_thresh_seen, 0,
1659 "Highest sack to ack ratio seen");
1660 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1661 SYSCTL_CHILDREN(rack_attack),
1662 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1663 &rack_highest_move_thresh_seen, 0,
1664 "Highest move to non-move ratio seen");
1665 rack_ack_total = counter_u64_alloc(M_WAITOK);
1666 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1667 SYSCTL_CHILDREN(rack_attack),
1668 OID_AUTO, "acktotal", CTLFLAG_RD,
1669 &rack_ack_total,
1670 "Total number of Ack's");
1671 rack_express_sack = counter_u64_alloc(M_WAITOK);
1672 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1673 SYSCTL_CHILDREN(rack_attack),
1674 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1675 &rack_express_sack,
1676 "Total expresss number of Sack's");
1677 rack_sack_total = counter_u64_alloc(M_WAITOK);
1678 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1679 SYSCTL_CHILDREN(rack_attack),
1680 OID_AUTO, "sacktotal", CTLFLAG_RD,
1681 &rack_sack_total,
1682 "Total number of SACKs");
1683 rack_move_none = counter_u64_alloc(M_WAITOK);
1684 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1685 SYSCTL_CHILDREN(rack_attack),
1686 OID_AUTO, "move_none", CTLFLAG_RD,
1687 &rack_move_none,
1688 "Total number of SACK index reuse of positions under threshold");
1689 rack_move_some = counter_u64_alloc(M_WAITOK);
1690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1691 SYSCTL_CHILDREN(rack_attack),
1692 OID_AUTO, "move_some", CTLFLAG_RD,
1693 &rack_move_some,
1694 "Total number of SACK index reuse of positions over threshold");
1695 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1697 SYSCTL_CHILDREN(rack_attack),
1698 OID_AUTO, "attacks", CTLFLAG_RD,
1699 &rack_sack_attacks_detected,
1700 "Total number of SACK attackers that had sack disabled");
1701 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1702 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1703 SYSCTL_CHILDREN(rack_attack),
1704 OID_AUTO, "reversed", CTLFLAG_RD,
1705 &rack_sack_attacks_reversed,
1706 "Total number of SACK attackers that were later determined false positive");
1707 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK);
1708 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1709 SYSCTL_CHILDREN(rack_attack),
1710 OID_AUTO, "suspect", CTLFLAG_RD,
1711 &rack_sack_attacks_suspect,
1712 "Total number of SACKs that triggered early detection");
1714 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1716 SYSCTL_CHILDREN(rack_attack),
1717 OID_AUTO, "nextmerge", CTLFLAG_RD,
1718 &rack_sack_used_next_merge,
1719 "Total number of times we used the next merge");
1720 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1722 SYSCTL_CHILDREN(rack_attack),
1723 OID_AUTO, "prevmerge", CTLFLAG_RD,
1724 &rack_sack_used_prev_merge,
1725 "Total number of times we used the prev merge");
1726 /* Counters */
1727 rack_total_bytes = counter_u64_alloc(M_WAITOK);
1728 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1729 SYSCTL_CHILDREN(rack_counters),
1730 OID_AUTO, "totalbytes", CTLFLAG_RD,
1731 &rack_total_bytes,
1732 "Total number of bytes sent");
1733 rack_fto_send = counter_u64_alloc(M_WAITOK);
1734 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1735 SYSCTL_CHILDREN(rack_counters),
1736 OID_AUTO, "fto_send", CTLFLAG_RD,
1737 &rack_fto_send, "Total number of rack_fast_output sends");
1738 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1740 SYSCTL_CHILDREN(rack_counters),
1741 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1742 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1743 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1744 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1745 SYSCTL_CHILDREN(rack_counters),
1746 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1747 &rack_nfto_resend, "Total number of rack_output retransmissions");
1748 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1750 SYSCTL_CHILDREN(rack_counters),
1751 OID_AUTO, "nfto_send", CTLFLAG_RD,
1752 &rack_non_fto_send, "Total number of rack_output first sends");
1753 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1755 SYSCTL_CHILDREN(rack_counters),
1756 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1757 &rack_extended_rfo, "Total number of times we extended rfo");
1759 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1760 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1761 SYSCTL_CHILDREN(rack_counters),
1762 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1763 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1764 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1766 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1767 SYSCTL_CHILDREN(rack_counters),
1768 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1769 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1770 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1771 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1772 SYSCTL_CHILDREN(rack_counters),
1773 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1774 &rack_tlp_tot,
1775 "Total number of tail loss probe expirations");
1776 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1777 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1778 SYSCTL_CHILDREN(rack_counters),
1779 OID_AUTO, "tlp_new", CTLFLAG_RD,
1780 &rack_tlp_newdata,
1781 "Total number of tail loss probe sending new data");
1782 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1783 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1784 SYSCTL_CHILDREN(rack_counters),
1785 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1786 &rack_tlp_retran,
1787 "Total number of tail loss probe sending retransmitted data");
1788 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1789 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1790 SYSCTL_CHILDREN(rack_counters),
1791 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1792 &rack_tlp_retran_bytes,
1793 "Total bytes of tail loss probe sending retransmitted data");
1794 rack_to_tot = counter_u64_alloc(M_WAITOK);
1795 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1796 SYSCTL_CHILDREN(rack_counters),
1797 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1798 &rack_to_tot,
1799 "Total number of times the rack to expired");
1800 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1801 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1802 SYSCTL_CHILDREN(rack_counters),
1803 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1804 &rack_saw_enobuf,
1805 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1806 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1807 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1808 SYSCTL_CHILDREN(rack_counters),
1809 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1810 &rack_saw_enobuf_hw,
1811 "Total number of times a send returned enobuf for hdwr paced connections");
1812 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1814 SYSCTL_CHILDREN(rack_counters),
1815 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1816 &rack_saw_enetunreach,
1817 "Total number of times a send received a enetunreachable");
1818 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1820 SYSCTL_CHILDREN(rack_counters),
1821 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1822 &rack_hot_alloc,
1823 "Total allocations from the top of our list");
1824 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1826 SYSCTL_CHILDREN(rack_counters),
1827 OID_AUTO, "allocs", CTLFLAG_RD,
1828 &rack_to_alloc,
1829 "Total allocations of tracking structures");
1830 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1831 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1832 SYSCTL_CHILDREN(rack_counters),
1833 OID_AUTO, "allochard", CTLFLAG_RD,
1834 &rack_to_alloc_hard,
1835 "Total allocations done with sleeping the hard way");
1836 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1837 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1838 SYSCTL_CHILDREN(rack_counters),
1839 OID_AUTO, "allocemerg", CTLFLAG_RD,
1840 &rack_to_alloc_emerg,
1841 "Total allocations done from emergency cache");
1842 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1843 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1844 SYSCTL_CHILDREN(rack_counters),
1845 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1846 &rack_to_alloc_limited,
1847 "Total allocations dropped due to limit");
1848 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1849 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1850 SYSCTL_CHILDREN(rack_counters),
1851 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1852 &rack_alloc_limited_conns,
1853 "Connections with allocations dropped due to limit");
1854 rack_split_limited = counter_u64_alloc(M_WAITOK);
1855 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1856 SYSCTL_CHILDREN(rack_counters),
1857 OID_AUTO, "split_limited", CTLFLAG_RD,
1858 &rack_split_limited,
1859 "Split allocations dropped due to limit");
1860 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK);
1861 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1862 SYSCTL_CHILDREN(rack_counters),
1863 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD,
1864 &rack_rxt_clamps_cwnd,
1865 "Number of times that excessive rxt clamped the cwnd down");
1866 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK);
1867 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1868 SYSCTL_CHILDREN(rack_counters),
1869 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD,
1870 &rack_rxt_clamps_cwnd_uniq,
1871 "Number of connections that have had excessive rxt clamped the cwnd down");
1872 rack_persists_sends = counter_u64_alloc(M_WAITOK);
1873 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1874 SYSCTL_CHILDREN(rack_counters),
1875 OID_AUTO, "persist_sends", CTLFLAG_RD,
1876 &rack_persists_sends,
1877 "Number of times we sent a persist probe");
1878 rack_persists_acks = counter_u64_alloc(M_WAITOK);
1879 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1880 SYSCTL_CHILDREN(rack_counters),
1881 OID_AUTO, "persist_acks", CTLFLAG_RD,
1882 &rack_persists_acks,
1883 "Number of times a persist probe was acked");
1884 rack_persists_loss = counter_u64_alloc(M_WAITOK);
1885 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1886 SYSCTL_CHILDREN(rack_counters),
1887 OID_AUTO, "persist_loss", CTLFLAG_RD,
1888 &rack_persists_loss,
1889 "Number of times we detected a lost persist probe (no ack)");
1890 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK);
1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1892 SYSCTL_CHILDREN(rack_counters),
1893 OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
1894 &rack_persists_lost_ends,
1895 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
1896 #ifdef INVARIANTS
1897 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1898 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1899 SYSCTL_CHILDREN(rack_counters),
1900 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1901 &rack_adjust_map_bw,
1902 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1903 #endif
1904 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1905 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1906 SYSCTL_CHILDREN(rack_counters),
1907 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1908 &rack_multi_single_eq,
1909 "Number of compressed acks total represented");
1910 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1911 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1912 SYSCTL_CHILDREN(rack_counters),
1913 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1914 &rack_proc_non_comp_ack,
1915 "Number of non compresseds acks that we processed");
1918 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1919 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1920 SYSCTL_CHILDREN(rack_counters),
1921 OID_AUTO, "sack_long", CTLFLAG_RD,
1922 &rack_sack_proc_all,
1923 "Total times we had to walk whole list for sack processing");
1924 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1925 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1926 SYSCTL_CHILDREN(rack_counters),
1927 OID_AUTO, "sack_restart", CTLFLAG_RD,
1928 &rack_sack_proc_restart,
1929 "Total times we had to walk whole list due to a restart");
1930 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1931 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1932 SYSCTL_CHILDREN(rack_counters),
1933 OID_AUTO, "sack_short", CTLFLAG_RD,
1934 &rack_sack_proc_short,
1935 "Total times we took shortcut for sack processing");
1936 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1937 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1938 SYSCTL_CHILDREN(rack_attack),
1939 OID_AUTO, "skipacked", CTLFLAG_RD,
1940 &rack_sack_skipped_acked,
1941 "Total number of times we skipped previously sacked");
1942 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1943 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1944 SYSCTL_CHILDREN(rack_attack),
1945 OID_AUTO, "ofsplit", CTLFLAG_RD,
1946 &rack_sack_splits,
1947 "Total number of times we did the old fashion tree split");
1948 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1949 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1950 SYSCTL_CHILDREN(rack_counters),
1951 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1952 &rack_input_idle_reduces,
1953 "Total number of idle reductions on input");
1954 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK);
1955 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1956 SYSCTL_CHILDREN(rack_counters),
1957 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD,
1958 &rack_collapsed_win_seen,
1959 "Total number of collapsed window events seen (where our window shrinks)");
1961 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1962 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1963 SYSCTL_CHILDREN(rack_counters),
1964 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1965 &rack_collapsed_win,
1966 "Total number of collapsed window events where we mark packets");
1967 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK);
1968 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1969 SYSCTL_CHILDREN(rack_counters),
1970 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD,
1971 &rack_collapsed_win_rxt,
1972 "Total number of packets that were retransmitted");
1973 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK);
1974 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1975 SYSCTL_CHILDREN(rack_counters),
1976 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD,
1977 &rack_collapsed_win_rxt_bytes,
1978 "Total number of bytes that were retransmitted");
1979 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1980 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1981 SYSCTL_CHILDREN(rack_counters),
1982 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1983 &rack_try_scwnd,
1984 "Total number of scwnd attempts");
1985 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1986 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1987 OID_AUTO, "outsize", CTLFLAG_RD,
1988 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1989 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1990 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1991 OID_AUTO, "opts", CTLFLAG_RD,
1992 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1993 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1994 SYSCTL_CHILDREN(rack_sysctl_root),
1995 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1996 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1999 static uint32_t
2000 rc_init_window(struct tcp_rack *rack)
2002 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
2006 static uint64_t
2007 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
2009 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
2010 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
2011 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2012 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
2013 else
2014 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
2017 static void
2018 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim,
2019 uint64_t data, uint8_t mod, uint16_t aux,
2020 struct tcp_sendfile_track *cur, int line)
2022 #ifdef TCP_REQUEST_TRK
2023 int do_log = 0;
2026 * The rate cap one is noisy and only should come out when normal BB logging
2027 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out
2028 * once per chunk and make up the BBpoint that can be turned on by the client.
2030 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
2032 * The very noisy two need to only come out when
2033 * we have verbose logging on.
2035 if (rack_verbose_logging != 0)
2036 do_log = tcp_bblogging_on(rack->rc_tp);
2037 else
2038 do_log = 0;
2039 } else if (mod != HYBRID_LOG_BW_MEASURE) {
2041 * All other less noisy logs here except the measure which
2042 * also needs to come out on the point and the log.
2044 do_log = tcp_bblogging_on(rack->rc_tp);
2045 } else {
2046 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING);
2049 if (do_log) {
2050 union tcp_log_stackspecific log;
2051 struct timeval tv;
2052 uint64_t lt_bw;
2054 /* Convert our ms to a microsecond */
2055 memset(&log, 0, sizeof(log));
2057 log.u_bbr.cwnd_gain = line;
2058 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2059 log.u_bbr.rttProp = tim;
2060 log.u_bbr.bw_inuse = cbw;
2061 log.u_bbr.delRate = rack_get_gp_est(rack);
2062 lt_bw = rack_get_lt_bw(rack);
2063 log.u_bbr.flex1 = seq;
2064 log.u_bbr.pacing_gain = aux;
2065 /* lt_bw = < flex3 | flex2 > */
2066 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff);
2067 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff);
2068 /* Record the last obtained us rtt in inflight */
2069 if (cur == NULL) {
2070 /* Make sure we are looking at the right log if an overide comes in */
2071 cur = rack->r_ctl.rc_last_sft;
2073 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY)
2074 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt;
2075 else {
2076 /* Use the last known rtt i.e. the rack-rtt */
2077 log.u_bbr.inflight = rack->rc_rack_rtt;
2079 if (cur != NULL) {
2080 uint64_t off;
2082 log.u_bbr.cur_del_rate = cur->deadline;
2083 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
2084 /* start = < lost | pkt_epoch > */
2085 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
2086 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
2087 log.u_bbr.flex6 = cur->start_seq;
2088 log.u_bbr.pkts_out = cur->end_seq;
2089 } else {
2090 /* start = < lost | pkt_epoch > */
2091 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
2092 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
2093 /* end = < pkts_out | flex6 > */
2094 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff);
2095 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff);
2097 /* first_send = <lt_epoch | epoch> */
2098 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff);
2099 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff);
2100 /* localtime = <delivered | applimited>*/
2101 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
2102 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
2103 #ifdef TCP_REQUEST_TRK
2104 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
2105 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
2106 #endif
2107 log.u_bbr.inhpts = 1;
2108 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs);
2109 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs);
2110 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags;
2111 } else {
2112 log.u_bbr.flex7 = 0xffff;
2113 log.u_bbr.cur_del_rate = 0xffffffffffffffff;
2116 * Compose bbr_state to be a bit wise 0000ADHF
2117 * where A is the always_pace flag
2118 * where D is the dgp_on flag
2119 * where H is the hybrid_mode on flag
2120 * where F is the use_fixed_rate flag.
2122 log.u_bbr.bbr_state = rack->rc_always_pace;
2123 log.u_bbr.bbr_state <<= 1;
2124 log.u_bbr.bbr_state |= rack->dgp_on;
2125 log.u_bbr.bbr_state <<= 1;
2126 log.u_bbr.bbr_state |= rack->rc_hybrid_mode;
2127 log.u_bbr.bbr_state <<= 1;
2128 log.u_bbr.bbr_state |= rack->use_fixed_rate;
2129 log.u_bbr.flex8 = mod;
2130 tcp_log_event(rack->rc_tp, NULL,
2131 &rack->rc_inp->inp_socket->so_rcv,
2132 &rack->rc_inp->inp_socket->so_snd,
2133 TCP_HYBRID_PACING_LOG, 0,
2134 0, &log, false, NULL, __func__, __LINE__, &tv);
2137 #endif
2140 #ifdef TCP_REQUEST_TRK
2141 static void
2142 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line)
2144 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) {
2145 union tcp_log_stackspecific log;
2146 struct timeval tv;
2147 uint64_t off;
2149 /* Convert our ms to a microsecond */
2150 memset(&log, 0, sizeof(log));
2152 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2153 log.u_bbr.delRate = cur->sent_at_fs;
2155 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) {
2157 * We did not get a new Rules Applied to set so
2158 * no overlapping send occured, this means the
2159 * current byte counts are correct.
2161 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes;
2162 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes;
2163 } else {
2165 * Overlapping send case, we switched to a new
2166 * send and did a rules applied.
2168 log.u_bbr.cur_del_rate = cur->sent_at_ls;
2169 log.u_bbr.rttProp = cur->rxt_at_ls;
2171 log.u_bbr.bw_inuse = cur->rxt_at_fs;
2172 log.u_bbr.cwnd_gain = line;
2173 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
2174 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
2175 /* start = < flex1 | flex2 > */
2176 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff);
2177 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
2178 /* end = < flex3 | flex4 > */
2179 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff);
2180 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff);
2182 /* localtime = <delivered | applimited>*/
2183 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
2184 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
2185 /* client timestamp = <lt_epoch | epoch>*/
2186 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff);
2187 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff);
2188 /* now set all the flags in */
2189 log.u_bbr.pkts_out = cur->hybrid_flags;
2190 log.u_bbr.lost = cur->playout_ms;
2191 log.u_bbr.flex6 = cur->flags;
2193 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases
2194 * where a false retransmit occurred so first_send <-> lastsend may
2195 * include longer time then it actually took if we have a false rxt.
2197 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff);
2198 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff);
2200 * Compose bbr_state to be a bit wise 0000ADHF
2201 * where A is the always_pace flag
2202 * where D is the dgp_on flag
2203 * where H is the hybrid_mode on flag
2204 * where F is the use_fixed_rate flag.
2206 log.u_bbr.bbr_state = rack->rc_always_pace;
2207 log.u_bbr.bbr_state <<= 1;
2208 log.u_bbr.bbr_state |= rack->dgp_on;
2209 log.u_bbr.bbr_state <<= 1;
2210 log.u_bbr.bbr_state |= rack->rc_hybrid_mode;
2211 log.u_bbr.bbr_state <<= 1;
2212 log.u_bbr.bbr_state |= rack->use_fixed_rate;
2214 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST;
2215 tcp_log_event(rack->rc_tp, NULL,
2216 &rack->rc_inp->inp_socket->so_rcv,
2217 &rack->rc_inp->inp_socket->so_snd,
2218 TCP_HYBRID_PACING_LOG, 0,
2219 0, &log, false, NULL, __func__, __LINE__, &tv);
2222 #endif
2224 static inline uint64_t
2225 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw)
2227 uint64_t ret_bw, ether;
2228 uint64_t u_segsiz;
2230 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr);
2231 if (rack->r_is_v6){
2232 #ifdef INET6
2233 ether += sizeof(struct ip6_hdr);
2234 #endif
2235 ether += 14; /* eheader size 6+6+2 */
2236 } else {
2237 #ifdef INET
2238 ether += sizeof(struct ip);
2239 #endif
2240 ether += 14; /* eheader size 6+6+2 */
2242 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs);
2243 ret_bw = bw;
2244 ret_bw *= ether;
2245 ret_bw /= u_segsiz;
2246 return (ret_bw);
2249 static void
2250 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
2252 #ifdef TCP_REQUEST_TRK
2253 struct timeval tv;
2254 uint64_t timenow, timeleft, lenleft, lengone, calcbw;
2255 #endif
2257 if (rack->r_ctl.bw_rate_cap == 0)
2258 return;
2259 #ifdef TCP_REQUEST_TRK
2260 if (rack->rc_catch_up && rack->rc_hybrid_mode &&
2261 (rack->r_ctl.rc_last_sft != NULL)) {
2263 * We have a dynamic cap. The original target
2264 * is in bw_rate_cap, but we need to look at
2265 * how long it is until we hit the deadline.
2267 struct tcp_sendfile_track *ent;
2269 ent = rack->r_ctl.rc_last_sft;
2270 microuptime(&tv);
2271 timenow = tcp_tv_to_lusectick(&tv);
2272 if (timenow >= ent->deadline) {
2273 /* No time left we do DGP only */
2274 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2275 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__);
2276 rack->r_ctl.bw_rate_cap = 0;
2277 return;
2279 /* We have the time */
2280 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow;
2281 if (timeleft < HPTS_MSEC_IN_SEC) {
2282 /* If there is less than a ms left just use DGPs rate */
2283 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2284 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__);
2285 rack->r_ctl.bw_rate_cap = 0;
2286 return;
2289 * Now lets find the amount of data left to send.
2291 * Now ideally we want to use the end_seq to figure out how much more
2292 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry..
2294 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
2295 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una))
2296 lenleft = ent->end_seq - rack->rc_tp->snd_una;
2297 else {
2298 /* TSNH, we should catch it at the send */
2299 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2300 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__);
2301 rack->r_ctl.bw_rate_cap = 0;
2302 return;
2304 } else {
2306 * The hard way, figure out how much is gone and then
2307 * take that away from the total the client asked for
2308 * (thats off by tls overhead if this is tls).
2310 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq))
2311 lengone = rack->rc_tp->snd_una - ent->start_seq;
2312 else
2313 lengone = 0;
2314 if (lengone < (ent->end - ent->start))
2315 lenleft = (ent->end - ent->start) - lengone;
2316 else {
2317 /* TSNH, we should catch it at the send */
2318 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2319 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__);
2320 rack->r_ctl.bw_rate_cap = 0;
2321 return;
2324 if (lenleft == 0) {
2325 /* We have it all sent */
2326 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2327 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__);
2328 if (rack->r_ctl.bw_rate_cap)
2329 goto normal_ratecap;
2330 else
2331 return;
2333 calcbw = lenleft * HPTS_USEC_IN_SEC;
2334 calcbw /= timeleft;
2335 /* Now we must compensate for IP/TCP overhead */
2336 calcbw = rack_compensate_for_linerate(rack, calcbw);
2337 /* Update the bit rate cap */
2338 rack->r_ctl.bw_rate_cap = calcbw;
2339 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
2340 (rack_hybrid_allow_set_maxseg == 1) &&
2341 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
2342 /* Lets set in a smaller mss possibly here to match our rate-cap */
2343 uint32_t orig_max;
2345 orig_max = rack->r_ctl.rc_pace_max_segs;
2346 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
2347 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp));
2348 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
2350 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2351 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__);
2352 if ((calcbw > 0) && (*bw > calcbw)) {
2353 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2354 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__);
2355 *capped = 1;
2356 *bw = calcbw;
2358 return;
2360 normal_ratecap:
2361 #endif
2362 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) {
2363 #ifdef TCP_REQUEST_TRK
2364 if (rack->rc_hybrid_mode &&
2365 rack->rc_catch_up &&
2366 (rack->r_ctl.rc_last_sft != NULL) &&
2367 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
2368 (rack_hybrid_allow_set_maxseg == 1) &&
2369 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
2370 /* Lets set in a smaller mss possibly here to match our rate-cap */
2371 uint32_t orig_max;
2373 orig_max = rack->r_ctl.rc_pace_max_segs;
2374 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
2375 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp));
2376 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
2378 #endif
2379 *capped = 1;
2380 *bw = rack->r_ctl.bw_rate_cap;
2381 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
2382 *bw, 0, 0,
2383 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__);
2387 static uint64_t
2388 rack_get_gp_est(struct tcp_rack *rack)
2390 uint64_t bw, lt_bw, ret_bw;
2392 if (rack->rc_gp_filled == 0) {
2394 * We have yet no b/w measurement,
2395 * if we have a user set initial bw
2396 * return it. If we don't have that and
2397 * we have an srtt, use the tcp IW (10) to
2398 * calculate a fictional b/w over the SRTT
2399 * which is more or less a guess. Note
2400 * we don't use our IW from rack on purpose
2401 * so if we have like IW=30, we are not
2402 * calculating a "huge" b/w.
2404 uint64_t srtt;
2406 if (rack->dis_lt_bw == 1)
2407 lt_bw = 0;
2408 else
2409 lt_bw = rack_get_lt_bw(rack);
2410 if (lt_bw) {
2412 * No goodput bw but a long-term b/w does exist
2413 * lets use that.
2415 ret_bw = lt_bw;
2416 goto compensate;
2418 if (rack->r_ctl.init_rate)
2419 return (rack->r_ctl.init_rate);
2421 /* Ok lets come up with the IW guess, if we have a srtt */
2422 if (rack->rc_tp->t_srtt == 0) {
2424 * Go with old pacing method
2425 * i.e. burst mitigation only.
2427 return (0);
2429 /* Ok lets get the initial TCP win (not racks) */
2430 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
2431 srtt = (uint64_t)rack->rc_tp->t_srtt;
2432 bw *= (uint64_t)USECS_IN_SECOND;
2433 bw /= srtt;
2434 ret_bw = bw;
2435 goto compensate;
2438 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
2439 /* Averaging is done, we can return the value */
2440 bw = rack->r_ctl.gp_bw;
2441 } else {
2442 /* Still doing initial average must calculate */
2443 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1);
2445 if (rack->dis_lt_bw) {
2446 /* We are not using lt-bw */
2447 ret_bw = bw;
2448 goto compensate;
2450 lt_bw = rack_get_lt_bw(rack);
2451 if (lt_bw == 0) {
2452 /* If we don't have one then equate it to the gp_bw */
2453 lt_bw = rack->r_ctl.gp_bw;
2455 if (rack->use_lesser_lt_bw) {
2456 if (lt_bw < bw)
2457 ret_bw = lt_bw;
2458 else
2459 ret_bw = bw;
2460 } else {
2461 if (lt_bw > bw)
2462 ret_bw = lt_bw;
2463 else
2464 ret_bw = bw;
2467 * Now lets compensate based on the TCP/IP overhead. Our
2468 * Goodput estimate does not include this so we must pace out
2469 * a bit faster since our pacing calculations do. The pacing
2470 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz
2471 * we are using to do this, so we do that here in the opposite
2472 * direction as well. This means that if we are tunneled and the
2473 * segsiz is say 1200 bytes we will get quite a boost, but its
2474 * compensated for in the pacing time the opposite way.
2476 compensate:
2477 ret_bw = rack_compensate_for_linerate(rack, ret_bw);
2478 return(ret_bw);
2482 static uint64_t
2483 rack_get_bw(struct tcp_rack *rack)
2485 uint64_t bw;
2487 if (rack->use_fixed_rate) {
2488 /* Return the fixed pacing rate */
2489 return (rack_get_fixed_pacing_bw(rack));
2491 bw = rack_get_gp_est(rack);
2492 return (bw);
2495 static uint16_t
2496 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2498 if (rack->use_fixed_rate) {
2499 return (100);
2500 } else if (rack->in_probe_rtt && (rsm == NULL))
2501 return (rack->r_ctl.rack_per_of_gp_probertt);
2502 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
2503 rack->r_ctl.rack_per_of_gp_rec)) {
2504 if (rsm) {
2505 /* a retransmission always use the recovery rate */
2506 return (rack->r_ctl.rack_per_of_gp_rec);
2507 } else if (rack->rack_rec_nonrxt_use_cr) {
2508 /* Directed to use the configured rate */
2509 goto configured_rate;
2510 } else if (rack->rack_no_prr &&
2511 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
2512 /* No PRR, lets just use the b/w estimate only */
2513 return (100);
2514 } else {
2516 * Here we may have a non-retransmit but we
2517 * have no overrides, so just use the recovery
2518 * rate (prr is in effect).
2520 return (rack->r_ctl.rack_per_of_gp_rec);
2523 configured_rate:
2524 /* For the configured rate we look at our cwnd vs the ssthresh */
2525 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2526 return (rack->r_ctl.rack_per_of_gp_ss);
2527 else
2528 return (rack->r_ctl.rack_per_of_gp_ca);
2531 static void
2532 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6)
2535 * Types of logs (mod value)
2536 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit.
2537 * 2 = a dsack round begins, persist is reset to 16.
2538 * 3 = a dsack round ends
2539 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh
2540 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack
2541 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh.
2543 if (tcp_bblogging_on(rack->rc_tp)) {
2544 union tcp_log_stackspecific log;
2545 struct timeval tv;
2547 memset(&log, 0, sizeof(log));
2548 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based;
2549 log.u_bbr.flex1 <<= 1;
2550 log.u_bbr.flex1 |= rack->rc_rack_use_dsack;
2551 log.u_bbr.flex1 <<= 1;
2552 log.u_bbr.flex1 |= rack->rc_dsack_round_seen;
2553 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end;
2554 log.u_bbr.flex3 = rack->r_ctl.num_dsack;
2555 log.u_bbr.flex4 = flex4;
2556 log.u_bbr.flex5 = flex5;
2557 log.u_bbr.flex6 = flex6;
2558 log.u_bbr.flex7 = rack->r_ctl.dsack_persist;
2559 log.u_bbr.flex8 = mod;
2560 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2561 log.u_bbr.epoch = rack->r_ctl.current_round;
2562 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost;
2563 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2564 &rack->rc_inp->inp_socket->so_rcv,
2565 &rack->rc_inp->inp_socket->so_snd,
2566 RACK_DSACK_HANDLING, 0,
2567 0, &log, false, &tv);
2571 static void
2572 rack_log_hdwr_pacing(struct tcp_rack *rack,
2573 uint64_t rate, uint64_t hw_rate, int line,
2574 int error, uint16_t mod)
2576 if (tcp_bblogging_on(rack->rc_tp)) {
2577 union tcp_log_stackspecific log;
2578 struct timeval tv;
2579 const struct ifnet *ifp;
2580 uint64_t ifp64;
2582 memset(&log, 0, sizeof(log));
2583 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2584 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2585 if (rack->r_ctl.crte) {
2586 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2587 } else if (rack->rc_inp->inp_route.ro_nh &&
2588 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2589 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2590 } else
2591 ifp = NULL;
2592 if (ifp) {
2593 ifp64 = (uintptr_t)ifp;
2594 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff);
2595 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff);
2597 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2598 log.u_bbr.bw_inuse = rate;
2599 log.u_bbr.flex5 = line;
2600 log.u_bbr.flex6 = error;
2601 log.u_bbr.flex7 = mod;
2602 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2603 log.u_bbr.flex8 = rack->use_fixed_rate;
2604 log.u_bbr.flex8 <<= 1;
2605 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2606 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2607 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2608 if (rack->r_ctl.crte)
2609 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2610 else
2611 log.u_bbr.cur_del_rate = 0;
2612 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2613 log.u_bbr.epoch = rack->r_ctl.current_round;
2614 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost;
2615 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2616 &rack->rc_inp->inp_socket->so_rcv,
2617 &rack->rc_inp->inp_socket->so_snd,
2618 BBR_LOG_HDWR_PACE, 0,
2619 0, &log, false, &tv);
2623 static uint64_t
2624 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2627 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2629 uint64_t bw_est, high_rate;
2630 uint64_t gain;
2632 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2633 bw_est = bw * gain;
2634 bw_est /= (uint64_t)100;
2635 /* Never fall below the minimum (def 64kbps) */
2636 if (bw_est < RACK_MIN_BW)
2637 bw_est = RACK_MIN_BW;
2638 if (rack->r_rack_hw_rate_caps) {
2639 /* Rate caps are in place */
2640 if (rack->r_ctl.crte != NULL) {
2641 /* We have a hdwr rate already */
2642 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2643 if (bw_est >= high_rate) {
2644 /* We are capping bw at the highest rate table entry */
2645 if (rack_hw_rate_cap_per &&
2646 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) {
2647 rack->r_rack_hw_rate_caps = 0;
2648 goto done;
2650 rack_log_hdwr_pacing(rack,
2651 bw_est, high_rate, __LINE__,
2652 0, 3);
2653 bw_est = high_rate;
2654 if (capped)
2655 *capped = 1;
2657 } else if ((rack->rack_hdrw_pacing == 0) &&
2658 (rack->rack_hdw_pace_ena) &&
2659 (rack->rack_attempt_hdwr_pace == 0) &&
2660 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2661 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2663 * Special case, we have not yet attempted hardware
2664 * pacing, and yet we may, when we do, find out if we are
2665 * above the highest rate. We need to know the maxbw for the interface
2666 * in question (if it supports ratelimiting). We get back
2667 * a 0, if the interface is not found in the RL lists.
2669 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2670 if (high_rate) {
2671 /* Yep, we have a rate is it above this rate? */
2672 if (bw_est > high_rate) {
2673 bw_est = high_rate;
2674 if (capped)
2675 *capped = 1;
2680 done:
2681 return (bw_est);
2684 static void
2685 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2687 if (tcp_bblogging_on(rack->rc_tp)) {
2688 union tcp_log_stackspecific log;
2689 struct timeval tv;
2691 if ((mod != 1) && (rack_verbose_logging == 0)) {
2693 * We get 3 values currently for mod
2694 * 1 - We are retransmitting and this tells the reason.
2695 * 2 - We are clearing a dup-ack count.
2696 * 3 - We are incrementing a dup-ack count.
2698 * The clear/increment are only logged
2699 * if you have BBverbose on.
2701 return;
2703 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2704 log.u_bbr.flex1 = tsused;
2705 log.u_bbr.flex2 = thresh;
2706 log.u_bbr.flex3 = rsm->r_flags;
2707 log.u_bbr.flex4 = rsm->r_dupack;
2708 log.u_bbr.flex5 = rsm->r_start;
2709 log.u_bbr.flex6 = rsm->r_end;
2710 log.u_bbr.flex8 = mod;
2711 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
2712 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2713 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2714 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2715 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2716 log.u_bbr.pacing_gain = rack->r_must_retran;
2717 log.u_bbr.epoch = rack->r_ctl.current_round;
2718 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost;
2719 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2720 &rack->rc_inp->inp_socket->so_rcv,
2721 &rack->rc_inp->inp_socket->so_snd,
2722 BBR_LOG_SETTINGS_CHG, 0,
2723 0, &log, false, &tv);
2727 static void
2728 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2730 if (tcp_bblogging_on(rack->rc_tp)) {
2731 union tcp_log_stackspecific log;
2732 struct timeval tv;
2734 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2735 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2736 log.u_bbr.flex2 = to;
2737 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2738 log.u_bbr.flex4 = slot;
2739 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot;
2740 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2741 log.u_bbr.flex7 = rack->rc_in_persist;
2742 log.u_bbr.flex8 = which;
2743 if (rack->rack_no_prr)
2744 log.u_bbr.pkts_out = 0;
2745 else
2746 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2747 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
2748 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2749 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2750 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2751 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2752 log.u_bbr.pacing_gain = rack->r_must_retran;
2753 log.u_bbr.cwnd_gain = rack->rack_deferred_inited;
2754 log.u_bbr.pkt_epoch = rack->rc_has_collapsed;
2755 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2756 log.u_bbr.lost = rack_rto_min;
2757 log.u_bbr.epoch = rack->r_ctl.roundends;
2758 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
2759 log.u_bbr.bw_inuse <<= 32;
2760 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
2761 log.u_bbr.applimited = rack->rc_tp->t_flags2;
2762 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2763 &rack->rc_inp->inp_socket->so_rcv,
2764 &rack->rc_inp->inp_socket->so_snd,
2765 BBR_LOG_TIMERSTAR, 0,
2766 0, &log, false, &tv);
2770 static void
2771 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2773 if (tcp_bblogging_on(rack->rc_tp)) {
2774 union tcp_log_stackspecific log;
2775 struct timeval tv;
2777 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2778 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
2779 log.u_bbr.flex8 = to_num;
2780 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2781 log.u_bbr.flex2 = rack->rc_rack_rtt;
2782 if (rsm == NULL)
2783 log.u_bbr.flex3 = 0;
2784 else
2785 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2786 if (rack->rack_no_prr)
2787 log.u_bbr.flex5 = 0;
2788 else
2789 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2790 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2791 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2792 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2793 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2794 log.u_bbr.pacing_gain = rack->r_must_retran;
2795 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
2796 log.u_bbr.bw_inuse <<= 32;
2797 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
2798 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2799 &rack->rc_inp->inp_socket->so_rcv,
2800 &rack->rc_inp->inp_socket->so_snd,
2801 BBR_LOG_RTO, 0,
2802 0, &log, false, &tv);
2806 static void
2807 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2808 struct rack_sendmap *prev,
2809 struct rack_sendmap *rsm,
2810 struct rack_sendmap *next,
2811 int flag, uint32_t th_ack, int line)
2813 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2814 union tcp_log_stackspecific log;
2815 struct timeval tv;
2817 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2818 log.u_bbr.flex8 = flag;
2819 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
2820 log.u_bbr.cur_del_rate = (uintptr_t)prev;
2821 log.u_bbr.delRate = (uintptr_t)rsm;
2822 log.u_bbr.rttProp = (uintptr_t)next;
2823 log.u_bbr.flex7 = 0;
2824 if (prev) {
2825 log.u_bbr.flex1 = prev->r_start;
2826 log.u_bbr.flex2 = prev->r_end;
2827 log.u_bbr.flex7 |= 0x4;
2829 if (rsm) {
2830 log.u_bbr.flex3 = rsm->r_start;
2831 log.u_bbr.flex4 = rsm->r_end;
2832 log.u_bbr.flex7 |= 0x2;
2834 if (next) {
2835 log.u_bbr.flex5 = next->r_start;
2836 log.u_bbr.flex6 = next->r_end;
2837 log.u_bbr.flex7 |= 0x1;
2839 log.u_bbr.applimited = line;
2840 log.u_bbr.pkts_out = th_ack;
2841 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2842 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2843 if (rack->rack_no_prr)
2844 log.u_bbr.lost = 0;
2845 else
2846 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2847 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
2848 log.u_bbr.bw_inuse <<= 32;
2849 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
2850 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2851 &rack->rc_inp->inp_socket->so_rcv,
2852 &rack->rc_inp->inp_socket->so_snd,
2853 TCP_LOG_MAPCHG, 0,
2854 0, &log, false, &tv);
2858 static void
2859 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2860 struct rack_sendmap *rsm, int conf)
2862 if (tcp_bblogging_on(tp)) {
2863 union tcp_log_stackspecific log;
2864 struct timeval tv;
2865 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2866 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
2867 log.u_bbr.flex1 = t;
2868 log.u_bbr.flex2 = len;
2869 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2870 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2871 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2872 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2873 log.u_bbr.flex7 = conf;
2874 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2875 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2876 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2877 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2878 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2879 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2880 if (rsm) {
2881 log.u_bbr.pkt_epoch = rsm->r_start;
2882 log.u_bbr.lost = rsm->r_end;
2883 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2884 /* We loose any upper of the 24 bits */
2885 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
2886 } else {
2887 /* Its a SYN */
2888 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2889 log.u_bbr.lost = 0;
2890 log.u_bbr.cwnd_gain = 0;
2891 log.u_bbr.pacing_gain = 0;
2893 /* Write out general bits of interest rrs here */
2894 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2895 log.u_bbr.use_lt_bw <<= 1;
2896 log.u_bbr.use_lt_bw |= rack->forced_ack;
2897 log.u_bbr.use_lt_bw <<= 1;
2898 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2899 log.u_bbr.use_lt_bw <<= 1;
2900 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2901 log.u_bbr.use_lt_bw <<= 1;
2902 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2903 log.u_bbr.use_lt_bw <<= 1;
2904 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2905 log.u_bbr.use_lt_bw <<= 1;
2906 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2907 log.u_bbr.use_lt_bw <<= 1;
2908 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2909 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2910 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2911 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2912 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2913 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2914 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2915 log.u_bbr.bw_inuse <<= 32;
2916 if (rsm)
2917 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2918 TCP_LOG_EVENTP(tp, NULL,
2919 &rack->rc_inp->inp_socket->so_rcv,
2920 &rack->rc_inp->inp_socket->so_snd,
2921 BBR_LOG_BBRRTT, 0,
2922 0, &log, false, &tv);
2928 static void
2929 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2932 * Log the rtt sample we are
2933 * applying to the srtt algorithm in
2934 * useconds.
2936 if (tcp_bblogging_on(rack->rc_tp)) {
2937 union tcp_log_stackspecific log;
2938 struct timeval tv;
2940 /* Convert our ms to a microsecond */
2941 memset(&log, 0, sizeof(log));
2942 log.u_bbr.flex1 = rtt;
2943 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2944 log.u_bbr.flex7 = 1;
2945 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2946 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2947 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2948 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2949 log.u_bbr.pacing_gain = rack->r_must_retran;
2951 * We capture in delRate the upper 32 bits as
2952 * the confidence level we had declared, and the
2953 * lower 32 bits as the actual RTT using the arrival
2954 * timestamp.
2956 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2957 log.u_bbr.delRate <<= 32;
2958 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2959 /* Lets capture all the things that make up t_rtxcur */
2960 log.u_bbr.applimited = rack_rto_min;
2961 log.u_bbr.epoch = rack_rto_max;
2962 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
2963 log.u_bbr.lost = rack_rto_min;
2964 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2965 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2966 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2967 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2968 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2969 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2970 &rack->rc_inp->inp_socket->so_rcv,
2971 &rack->rc_inp->inp_socket->so_snd,
2972 TCP_LOG_RTT, 0,
2973 0, &log, false, &tv);
2977 static void
2978 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2980 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
2981 union tcp_log_stackspecific log;
2982 struct timeval tv;
2984 /* Convert our ms to a microsecond */
2985 memset(&log, 0, sizeof(log));
2986 log.u_bbr.flex1 = rtt;
2987 log.u_bbr.flex2 = send_time;
2988 log.u_bbr.flex3 = ack_time;
2989 log.u_bbr.flex4 = where;
2990 log.u_bbr.flex7 = 2;
2991 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2992 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
2993 log.u_bbr.bw_inuse <<= 32;
2994 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
2995 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2996 &rack->rc_inp->inp_socket->so_rcv,
2997 &rack->rc_inp->inp_socket->so_snd,
2998 TCP_LOG_RTT, 0,
2999 0, &log, false, &tv);
3004 static void
3005 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho)
3007 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
3008 union tcp_log_stackspecific log;
3009 struct timeval tv;
3011 /* Convert our ms to a microsecond */
3012 memset(&log, 0, sizeof(log));
3013 log.u_bbr.flex1 = idx;
3014 log.u_bbr.flex2 = rack_ts_to_msec(tsv);
3015 log.u_bbr.flex3 = tsecho;
3016 log.u_bbr.flex7 = 3;
3017 log.u_bbr.rttProp = tsv;
3018 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3019 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
3020 log.u_bbr.bw_inuse <<= 32;
3021 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
3022 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3023 &rack->rc_inp->inp_socket->so_rcv,
3024 &rack->rc_inp->inp_socket->so_snd,
3025 TCP_LOG_RTT, 0,
3026 0, &log, false, &tv);
3031 static inline void
3032 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
3034 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
3035 union tcp_log_stackspecific log;
3036 struct timeval tv;
3038 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3039 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
3040 log.u_bbr.flex1 = line;
3041 log.u_bbr.flex2 = tick;
3042 log.u_bbr.flex3 = tp->t_maxunacktime;
3043 log.u_bbr.flex4 = tp->t_acktime;
3044 log.u_bbr.flex8 = event;
3045 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3046 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3047 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3048 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3049 log.u_bbr.pacing_gain = rack->r_must_retran;
3050 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
3051 log.u_bbr.bw_inuse <<= 32;
3052 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
3053 TCP_LOG_EVENTP(tp, NULL,
3054 &rack->rc_inp->inp_socket->so_rcv,
3055 &rack->rc_inp->inp_socket->so_snd,
3056 BBR_LOG_PROGRESS, 0,
3057 0, &log, false, &tv);
3061 static void
3062 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line)
3064 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
3065 union tcp_log_stackspecific log;
3067 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3068 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
3069 log.u_bbr.flex1 = slot;
3070 if (rack->rack_no_prr)
3071 log.u_bbr.flex2 = 0;
3072 else
3073 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
3074 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
3075 log.u_bbr.flex6 = line;
3076 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
3077 log.u_bbr.flex8 = rack->rc_in_persist;
3078 log.u_bbr.timeStamp = cts;
3079 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3080 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3081 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3082 log.u_bbr.pacing_gain = rack->r_must_retran;
3083 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3084 &rack->rc_inp->inp_socket->so_rcv,
3085 &rack->rc_inp->inp_socket->so_snd,
3086 BBR_LOG_BBRSND, 0,
3087 0, &log, false, tv);
3091 static void
3092 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
3094 if (tcp_bblogging_on(rack->rc_tp)) {
3095 union tcp_log_stackspecific log;
3096 struct timeval tv;
3098 memset(&log, 0, sizeof(log));
3099 log.u_bbr.flex1 = did_out;
3100 log.u_bbr.flex2 = nxt_pkt;
3101 log.u_bbr.flex3 = way_out;
3102 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
3103 if (rack->rack_no_prr)
3104 log.u_bbr.flex5 = 0;
3105 else
3106 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
3107 log.u_bbr.flex6 = nsegs;
3108 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
3109 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
3110 log.u_bbr.flex7 <<= 1;
3111 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
3112 log.u_bbr.flex7 <<= 1;
3113 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
3114 log.u_bbr.flex8 = rack->rc_in_persist;
3115 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
3116 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3117 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3118 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
3119 log.u_bbr.use_lt_bw <<= 1;
3120 log.u_bbr.use_lt_bw |= rack->r_might_revert;
3121 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3122 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3123 log.u_bbr.pacing_gain = rack->r_must_retran;
3124 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
3125 log.u_bbr.bw_inuse <<= 32;
3126 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
3127 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat;
3128 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat;
3129 log.u_bbr.lost = rack->rc_tp->t_srtt;
3130 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt;
3131 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3132 &rack->rc_inp->inp_socket->so_rcv,
3133 &rack->rc_inp->inp_socket->so_snd,
3134 BBR_LOG_DOSEG_DONE, 0,
3135 0, &log, false, &tv);
3139 static void
3140 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
3142 if (tcp_bblogging_on(rack->rc_tp)) {
3143 union tcp_log_stackspecific log;
3144 struct timeval tv;
3146 memset(&log, 0, sizeof(log));
3147 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
3148 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
3149 log.u_bbr.flex4 = arg1;
3150 log.u_bbr.flex5 = arg2;
3151 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs;
3152 log.u_bbr.flex6 = arg3;
3153 log.u_bbr.flex8 = frm;
3154 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3155 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3156 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3157 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
3158 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3159 log.u_bbr.pacing_gain = rack->r_must_retran;
3160 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
3161 &tptosocket(tp)->so_snd,
3162 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv);
3166 static void
3167 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
3168 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
3170 if (tcp_bblogging_on(rack->rc_tp)) {
3171 union tcp_log_stackspecific log;
3172 struct timeval tv;
3174 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3175 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
3176 log.u_bbr.flex1 = slot;
3177 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
3178 log.u_bbr.flex4 = reason;
3179 if (rack->rack_no_prr)
3180 log.u_bbr.flex5 = 0;
3181 else
3182 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
3183 log.u_bbr.flex7 = hpts_calling;
3184 log.u_bbr.flex8 = rack->rc_in_persist;
3185 log.u_bbr.lt_epoch = cwnd_to_use;
3186 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3187 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3188 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3189 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3190 log.u_bbr.pacing_gain = rack->r_must_retran;
3191 log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
3192 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
3193 log.u_bbr.bw_inuse <<= 32;
3194 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
3195 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3196 &rack->rc_inp->inp_socket->so_rcv,
3197 &rack->rc_inp->inp_socket->so_snd,
3198 BBR_LOG_JUSTRET, 0,
3199 tlen, &log, false, &tv);
3203 static void
3204 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
3205 struct timeval *tv, uint32_t flags_on_entry)
3207 if (tcp_bblogging_on(rack->rc_tp)) {
3208 union tcp_log_stackspecific log;
3210 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3211 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
3212 log.u_bbr.flex1 = line;
3213 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
3214 log.u_bbr.flex3 = flags_on_entry;
3215 log.u_bbr.flex4 = us_cts;
3216 if (rack->rack_no_prr)
3217 log.u_bbr.flex5 = 0;
3218 else
3219 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
3220 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
3221 log.u_bbr.flex7 = hpts_removed;
3222 log.u_bbr.flex8 = 1;
3223 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
3224 log.u_bbr.timeStamp = us_cts;
3225 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3226 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3227 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3228 log.u_bbr.pacing_gain = rack->r_must_retran;
3229 log.u_bbr.bw_inuse = rack->r_ctl.current_round;
3230 log.u_bbr.bw_inuse <<= 32;
3231 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost;
3232 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3233 &rack->rc_inp->inp_socket->so_rcv,
3234 &rack->rc_inp->inp_socket->so_snd,
3235 BBR_LOG_TIMERCANC, 0,
3236 0, &log, false, tv);
3240 static void
3241 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
3242 uint32_t flex1, uint32_t flex2,
3243 uint32_t flex3, uint32_t flex4,
3244 uint32_t flex5, uint32_t flex6,
3245 uint16_t flex7, uint8_t mod)
3247 if (tcp_bblogging_on(rack->rc_tp)) {
3248 union tcp_log_stackspecific log;
3249 struct timeval tv;
3251 if (mod == 1) {
3252 /* No you can't use 1, its for the real to cancel */
3253 return;
3255 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3256 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3257 log.u_bbr.flex1 = flex1;
3258 log.u_bbr.flex2 = flex2;
3259 log.u_bbr.flex3 = flex3;
3260 log.u_bbr.flex4 = flex4;
3261 log.u_bbr.flex5 = flex5;
3262 log.u_bbr.flex6 = flex6;
3263 log.u_bbr.flex7 = flex7;
3264 log.u_bbr.flex8 = mod;
3265 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3266 &rack->rc_inp->inp_socket->so_rcv,
3267 &rack->rc_inp->inp_socket->so_snd,
3268 BBR_LOG_TIMERCANC, 0,
3269 0, &log, false, &tv);
3273 static void
3274 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
3276 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
3277 union tcp_log_stackspecific log;
3278 struct timeval tv;
3280 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3281 log.u_bbr.flex1 = timers;
3282 log.u_bbr.flex2 = ret;
3283 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
3284 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
3285 log.u_bbr.flex5 = cts;
3286 if (rack->rack_no_prr)
3287 log.u_bbr.flex6 = 0;
3288 else
3289 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
3290 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
3291 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
3292 log.u_bbr.pacing_gain = rack->r_must_retran;
3293 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3294 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3295 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3296 &rack->rc_inp->inp_socket->so_rcv,
3297 &rack->rc_inp->inp_socket->so_snd,
3298 BBR_LOG_TO_PROCESS, 0,
3299 0, &log, false, &tv);
3303 static void
3304 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
3306 if (tcp_bblogging_on(rack->rc_tp)) {
3307 union tcp_log_stackspecific log;
3308 struct timeval tv;
3310 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3311 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
3312 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
3313 if (rack->rack_no_prr)
3314 log.u_bbr.flex3 = 0;
3315 else
3316 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
3317 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
3318 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
3319 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
3320 log.u_bbr.flex7 = line;
3321 log.u_bbr.flex8 = frm;
3322 log.u_bbr.pkts_out = orig_cwnd;
3323 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3324 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3325 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
3326 log.u_bbr.use_lt_bw <<= 1;
3327 log.u_bbr.use_lt_bw |= rack->r_might_revert;
3328 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3329 &rack->rc_inp->inp_socket->so_rcv,
3330 &rack->rc_inp->inp_socket->so_snd,
3331 BBR_LOG_BBRUPD, 0,
3332 0, &log, false, &tv);
3336 static void
3337 rack_counter_destroy(void)
3339 counter_u64_free(rack_total_bytes);
3340 counter_u64_free(rack_fto_send);
3341 counter_u64_free(rack_fto_rsm_send);
3342 counter_u64_free(rack_nfto_resend);
3343 counter_u64_free(rack_hw_pace_init_fail);
3344 counter_u64_free(rack_hw_pace_lost);
3345 counter_u64_free(rack_non_fto_send);
3346 counter_u64_free(rack_extended_rfo);
3347 counter_u64_free(rack_ack_total);
3348 counter_u64_free(rack_express_sack);
3349 counter_u64_free(rack_sack_total);
3350 counter_u64_free(rack_move_none);
3351 counter_u64_free(rack_move_some);
3352 counter_u64_free(rack_sack_attacks_detected);
3353 counter_u64_free(rack_sack_attacks_reversed);
3354 counter_u64_free(rack_sack_attacks_suspect);
3355 counter_u64_free(rack_sack_used_next_merge);
3356 counter_u64_free(rack_sack_used_prev_merge);
3357 counter_u64_free(rack_tlp_tot);
3358 counter_u64_free(rack_tlp_newdata);
3359 counter_u64_free(rack_tlp_retran);
3360 counter_u64_free(rack_tlp_retran_bytes);
3361 counter_u64_free(rack_to_tot);
3362 counter_u64_free(rack_saw_enobuf);
3363 counter_u64_free(rack_saw_enobuf_hw);
3364 counter_u64_free(rack_saw_enetunreach);
3365 counter_u64_free(rack_hot_alloc);
3366 counter_u64_free(rack_to_alloc);
3367 counter_u64_free(rack_to_alloc_hard);
3368 counter_u64_free(rack_to_alloc_emerg);
3369 counter_u64_free(rack_to_alloc_limited);
3370 counter_u64_free(rack_alloc_limited_conns);
3371 counter_u64_free(rack_split_limited);
3372 counter_u64_free(rack_multi_single_eq);
3373 counter_u64_free(rack_rxt_clamps_cwnd);
3374 counter_u64_free(rack_rxt_clamps_cwnd_uniq);
3375 counter_u64_free(rack_proc_non_comp_ack);
3376 counter_u64_free(rack_sack_proc_all);
3377 counter_u64_free(rack_sack_proc_restart);
3378 counter_u64_free(rack_sack_proc_short);
3379 counter_u64_free(rack_sack_skipped_acked);
3380 counter_u64_free(rack_sack_splits);
3381 counter_u64_free(rack_input_idle_reduces);
3382 counter_u64_free(rack_collapsed_win);
3383 counter_u64_free(rack_collapsed_win_rxt);
3384 counter_u64_free(rack_collapsed_win_rxt_bytes);
3385 counter_u64_free(rack_collapsed_win_seen);
3386 counter_u64_free(rack_try_scwnd);
3387 counter_u64_free(rack_persists_sends);
3388 counter_u64_free(rack_persists_acks);
3389 counter_u64_free(rack_persists_loss);
3390 counter_u64_free(rack_persists_lost_ends);
3391 #ifdef INVARIANTS
3392 counter_u64_free(rack_adjust_map_bw);
3393 #endif
3394 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
3395 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
3398 static struct rack_sendmap *
3399 rack_alloc(struct tcp_rack *rack)
3401 struct rack_sendmap *rsm;
3404 * First get the top of the list it in
3405 * theory is the "hottest" rsm we have,
3406 * possibly just freed by ack processing.
3408 if (rack->rc_free_cnt > rack_free_cache) {
3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3411 counter_u64_add(rack_hot_alloc, 1);
3412 rack->rc_free_cnt--;
3413 return (rsm);
3416 * Once we get under our free cache we probably
3417 * no longer have a "hot" one available. Lets
3418 * get one from UMA.
3420 rsm = uma_zalloc(rack_zone, M_NOWAIT);
3421 if (rsm) {
3422 rack->r_ctl.rc_num_maps_alloced++;
3423 counter_u64_add(rack_to_alloc, 1);
3424 return (rsm);
3427 * Dig in to our aux rsm's (the last two) since
3428 * UMA failed to get us one.
3430 if (rack->rc_free_cnt) {
3431 counter_u64_add(rack_to_alloc_emerg, 1);
3432 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3433 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3434 rack->rc_free_cnt--;
3435 return (rsm);
3437 return (NULL);
3440 static struct rack_sendmap *
3441 rack_alloc_full_limit(struct tcp_rack *rack)
3443 if ((V_tcp_map_entries_limit > 0) &&
3444 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
3445 counter_u64_add(rack_to_alloc_limited, 1);
3446 if (!rack->alloc_limit_reported) {
3447 rack->alloc_limit_reported = 1;
3448 counter_u64_add(rack_alloc_limited_conns, 1);
3450 return (NULL);
3452 return (rack_alloc(rack));
3455 /* wrapper to allocate a sendmap entry, subject to a specific limit */
3456 static struct rack_sendmap *
3457 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
3459 struct rack_sendmap *rsm;
3461 if (limit_type) {
3462 /* currently there is only one limit type */
3463 if (rack->r_ctl.rc_split_limit > 0 &&
3464 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) {
3465 counter_u64_add(rack_split_limited, 1);
3466 if (!rack->alloc_limit_reported) {
3467 rack->alloc_limit_reported = 1;
3468 counter_u64_add(rack_alloc_limited_conns, 1);
3470 return (NULL);
3474 /* allocate and mark in the limit type, if set */
3475 rsm = rack_alloc(rack);
3476 if (rsm != NULL && limit_type) {
3477 rsm->r_limit_type = limit_type;
3478 rack->r_ctl.rc_num_split_allocs++;
3480 return (rsm);
3483 static void
3484 rack_free_trim(struct tcp_rack *rack)
3486 struct rack_sendmap *rsm;
3489 * Free up all the tail entries until
3490 * we get our list down to the limit.
3492 while (rack->rc_free_cnt > rack_free_cache) {
3493 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3494 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3495 rack->rc_free_cnt--;
3496 rack->r_ctl.rc_num_maps_alloced--;
3497 uma_zfree(rack_zone, rsm);
3501 static void
3502 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
3504 if (rsm->r_flags & RACK_APP_LIMITED) {
3505 if (rack->r_ctl.rc_app_limited_cnt > 0) {
3506 rack->r_ctl.rc_app_limited_cnt--;
3509 if (rsm->r_limit_type) {
3510 /* currently there is only one limit type */
3511 rack->r_ctl.rc_num_split_allocs--;
3513 if (rsm == rack->r_ctl.rc_first_appl) {
3514 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start);
3515 rack->r_ctl.cleared_app_ack = 1;
3516 if (rack->r_ctl.rc_app_limited_cnt == 0)
3517 rack->r_ctl.rc_first_appl = NULL;
3518 else
3519 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl);
3521 if (rsm == rack->r_ctl.rc_resend)
3522 rack->r_ctl.rc_resend = NULL;
3523 if (rsm == rack->r_ctl.rc_end_appl)
3524 rack->r_ctl.rc_end_appl = NULL;
3525 if (rack->r_ctl.rc_tlpsend == rsm)
3526 rack->r_ctl.rc_tlpsend = NULL;
3527 if (rack->r_ctl.rc_sacklast == rsm)
3528 rack->r_ctl.rc_sacklast = NULL;
3529 memset(rsm, 0, sizeof(struct rack_sendmap));
3530 /* Make sure we are not going to overrun our count limit of 0xff */
3531 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) {
3532 rack_free_trim(rack);
3534 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
3535 rack->rc_free_cnt++;
3538 static uint32_t
3539 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
3541 uint64_t srtt, bw, len, tim;
3542 uint32_t segsiz, def_len, minl;
3544 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3545 def_len = rack_def_data_window * segsiz;
3546 if (rack->rc_gp_filled == 0) {
3548 * We have no measurement (IW is in flight?) so
3549 * we can only guess using our data_window sysctl
3550 * value (usually 20MSS).
3552 return (def_len);
3555 * Now we have a number of factors to consider.
3557 * 1) We have a desired BDP which is usually
3558 * at least 2.
3559 * 2) We have a minimum number of rtt's usually 1 SRTT
3560 * but we allow it too to be more.
3561 * 3) We want to make sure a measurement last N useconds (if
3562 * we have set rack_min_measure_usec.
3564 * We handle the first concern here by trying to create a data
3565 * window of max(rack_def_data_window, DesiredBDP). The
3566 * second concern we handle in not letting the measurement
3567 * window end normally until at least the required SRTT's
3568 * have gone by which is done further below in
3569 * rack_enough_for_measurement(). Finally the third concern
3570 * we also handle here by calculating how long that time
3571 * would take at the current BW and then return the
3572 * max of our first calculation and that length. Note
3573 * that if rack_min_measure_usec is 0, we don't deal
3574 * with concern 3. Also for both Concern 1 and 3 an
3575 * application limited period could end the measurement
3576 * earlier.
3578 * So lets calculate the BDP with the "known" b/w using
3579 * the SRTT has our rtt and then multiply it by the
3580 * goal.
3582 bw = rack_get_bw(rack);
3583 srtt = (uint64_t)tp->t_srtt;
3584 len = bw * srtt;
3585 len /= (uint64_t)HPTS_USEC_IN_SEC;
3586 len *= max(1, rack_goal_bdp);
3587 /* Now we need to round up to the nearest MSS */
3588 len = roundup(len, segsiz);
3589 if (rack_min_measure_usec) {
3590 /* Now calculate our min length for this b/w */
3591 tim = rack_min_measure_usec;
3592 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3593 if (minl == 0)
3594 minl = 1;
3595 minl = roundup(minl, segsiz);
3596 if (len < minl)
3597 len = minl;
3600 * Now if we have a very small window we want
3601 * to attempt to get the window that is
3602 * as small as possible. This happens on
3603 * low b/w connections and we don't want to
3604 * span huge numbers of rtt's between measurements.
3606 * We basically include 2 over our "MIN window" so
3607 * that the measurement can be shortened (possibly) by
3608 * an ack'ed packet.
3610 if (len < def_len)
3611 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3612 else
3613 return (max((uint32_t)len, def_len));
3617 static int
3618 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality)
3620 uint32_t tim, srtts, segsiz;
3623 * Has enough time passed for the GP measurement to be valid?
3625 if (SEQ_LT(th_ack, tp->gput_seq)) {
3626 /* Not enough bytes yet */
3627 return (0);
3629 if ((tp->snd_max == tp->snd_una) ||
3630 (th_ack == tp->snd_max)){
3632 * All is acked quality of all acked is
3633 * usually low or medium, but we in theory could split
3634 * all acked into two cases, where you got
3635 * a signifigant amount of your window and
3636 * where you did not. For now we leave it
3637 * but it is something to contemplate in the
3638 * future. The danger here is that delayed ack
3639 * is effecting the last byte (which is a 50:50 chance).
3641 *quality = RACK_QUALITY_ALLACKED;
3642 return (1);
3644 if (SEQ_GEQ(th_ack, tp->gput_ack)) {
3646 * We obtained our entire window of data we wanted
3647 * no matter if we are in recovery or not then
3648 * its ok since expanding the window does not
3649 * make things fuzzy (or at least not as much).
3651 *quality = RACK_QUALITY_HIGH;
3652 return (1);
3654 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3655 if (SEQ_LT(th_ack, tp->gput_ack) &&
3656 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3657 /* Not enough bytes yet */
3658 return (0);
3660 if (rack->r_ctl.rc_first_appl &&
3661 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) {
3663 * We are up to the app limited send point
3664 * we have to measure irrespective of the time..
3666 *quality = RACK_QUALITY_APPLIMITED;
3667 return (1);
3669 /* Now what about time? */
3670 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3671 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3672 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
3674 * We do not allow a measurement if we are in recovery
3675 * that would shrink the goodput window we wanted.
3676 * This is to prevent cloudyness of when the last send
3677 * was actually made.
3679 *quality = RACK_QUALITY_HIGH;
3680 return (1);
3682 /* Nope not even a full SRTT has passed */
3683 return (0);
3686 static void
3687 rack_log_timely(struct tcp_rack *rack,
3688 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3689 uint64_t up_bnd, int line, uint8_t method)
3691 if (tcp_bblogging_on(rack->rc_tp)) {
3692 union tcp_log_stackspecific log;
3693 struct timeval tv;
3695 memset(&log, 0, sizeof(log));
3696 log.u_bbr.flex1 = logged;
3697 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3698 log.u_bbr.flex2 <<= 4;
3699 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3700 log.u_bbr.flex2 <<= 4;
3701 log.u_bbr.flex2 |= rack->rc_gp_incr;
3702 log.u_bbr.flex2 <<= 4;
3703 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3704 log.u_bbr.flex3 = rack->rc_gp_incr;
3705 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3706 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3707 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3708 log.u_bbr.flex7 = rack->rc_gp_bwred;
3709 log.u_bbr.flex8 = method;
3710 log.u_bbr.cur_del_rate = cur_bw;
3711 log.u_bbr.delRate = low_bnd;
3712 log.u_bbr.bw_inuse = up_bnd;
3713 log.u_bbr.rttProp = rack_get_bw(rack);
3714 log.u_bbr.pkt_epoch = line;
3715 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3716 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3717 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3718 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3719 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3720 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3721 log.u_bbr.cwnd_gain <<= 1;
3722 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3723 log.u_bbr.cwnd_gain <<= 1;
3724 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3725 log.u_bbr.cwnd_gain <<= 1;
3726 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3727 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3728 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3729 &rack->rc_inp->inp_socket->so_rcv,
3730 &rack->rc_inp->inp_socket->so_snd,
3731 TCP_TIMELY_WORK, 0,
3732 0, &log, false, &tv);
3736 static int
3737 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3740 * Before we increase we need to know if
3741 * the estimate just made was less than
3742 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3744 * If we already are pacing at a fast enough
3745 * rate to push us faster there is no sense of
3746 * increasing.
3748 * We first caculate our actual pacing rate (ss or ca multiplier
3749 * times our cur_bw).
3751 * Then we take the last measured rate and multipy by our
3752 * maximum pacing overage to give us a max allowable rate.
3754 * If our act_rate is smaller than our max_allowable rate
3755 * then we should increase. Else we should hold steady.
3758 uint64_t act_rate, max_allow_rate;
3760 if (rack_timely_no_stopping)
3761 return (1);
3763 if ((cur_bw == 0) || (last_bw_est == 0)) {
3765 * Initial startup case or
3766 * everything is acked case.
3768 rack_log_timely(rack, mult, cur_bw, 0, 0,
3769 __LINE__, 9);
3770 return (1);
3772 if (mult <= 100) {
3774 * We can always pace at or slightly above our rate.
3776 rack_log_timely(rack, mult, cur_bw, 0, 0,
3777 __LINE__, 9);
3778 return (1);
3780 act_rate = cur_bw * (uint64_t)mult;
3781 act_rate /= 100;
3782 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3783 max_allow_rate /= 100;
3784 if (act_rate < max_allow_rate) {
3786 * Here the rate we are actually pacing at
3787 * is smaller than 10% above our last measurement.
3788 * This means we are pacing below what we would
3789 * like to try to achieve (plus some wiggle room).
3791 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3792 __LINE__, 9);
3793 return (1);
3794 } else {
3796 * Here we are already pacing at least rack_max_per_above(10%)
3797 * what we are getting back. This indicates most likely
3798 * that we are being limited (cwnd/rwnd/app) and can't
3799 * get any more b/w. There is no sense of trying to
3800 * raise up the pacing rate its not speeding us up
3801 * and we already are pacing faster than we are getting.
3803 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3804 __LINE__, 8);
3805 return (0);
3809 static void
3810 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3813 * When we drag bottom, we want to assure
3814 * that no multiplier is below 1.0, if so
3815 * we want to restore it to at least that.
3817 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3818 /* This is unlikely we usually do not touch recovery */
3819 rack->r_ctl.rack_per_of_gp_rec = 100;
3821 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3822 rack->r_ctl.rack_per_of_gp_ca = 100;
3824 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3825 rack->r_ctl.rack_per_of_gp_ss = 100;
3829 static void
3830 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3832 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3833 rack->r_ctl.rack_per_of_gp_ca = 100;
3835 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3836 rack->r_ctl.rack_per_of_gp_ss = 100;
3840 static void
3841 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3843 int32_t calc, logged, plus;
3845 logged = 0;
3847 if (rack->rc_skip_timely)
3848 return;
3849 if (override) {
3851 * override is passed when we are
3852 * loosing b/w and making one last
3853 * gasp at trying to not loose out
3854 * to a new-reno flow.
3856 goto extra_boost;
3858 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3859 if (rack->rc_gp_incr &&
3860 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3862 * Reset and get 5 strokes more before the boost. Note
3863 * that the count is 0 based so we have to add one.
3865 extra_boost:
3866 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3867 rack->rc_gp_timely_inc_cnt = 0;
3868 } else
3869 plus = (uint32_t)rack_gp_increase_per;
3870 /* Must be at least 1% increase for true timely increases */
3871 if ((plus < 1) &&
3872 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3873 plus = 1;
3874 if (rack->rc_gp_saw_rec &&
3875 (rack->rc_gp_no_rec_chg == 0) &&
3876 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3877 rack->r_ctl.rack_per_of_gp_rec)) {
3878 /* We have been in recovery ding it too */
3879 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3880 if (calc > 0xffff)
3881 calc = 0xffff;
3882 logged |= 1;
3883 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3884 if (rack->r_ctl.rack_per_upper_bound_ca &&
3885 (rack->rc_dragged_bottom == 0) &&
3886 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca))
3887 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca;
3889 if (rack->rc_gp_saw_ca &&
3890 (rack->rc_gp_saw_ss == 0) &&
3891 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3892 rack->r_ctl.rack_per_of_gp_ca)) {
3893 /* In CA */
3894 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3895 if (calc > 0xffff)
3896 calc = 0xffff;
3897 logged |= 2;
3898 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3899 if (rack->r_ctl.rack_per_upper_bound_ca &&
3900 (rack->rc_dragged_bottom == 0) &&
3901 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca))
3902 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca;
3904 if (rack->rc_gp_saw_ss &&
3905 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3906 rack->r_ctl.rack_per_of_gp_ss)) {
3907 /* In SS */
3908 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3909 if (calc > 0xffff)
3910 calc = 0xffff;
3911 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3912 if (rack->r_ctl.rack_per_upper_bound_ss &&
3913 (rack->rc_dragged_bottom == 0) &&
3914 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss))
3915 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss;
3916 logged |= 4;
3918 if (logged &&
3919 (rack->rc_gp_incr == 0)){
3920 /* Go into increment mode */
3921 rack->rc_gp_incr = 1;
3922 rack->rc_gp_timely_inc_cnt = 0;
3924 if (rack->rc_gp_incr &&
3925 logged &&
3926 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3927 rack->rc_gp_timely_inc_cnt++;
3929 rack_log_timely(rack, logged, plus, 0, 0,
3930 __LINE__, 1);
3933 static uint32_t
3934 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3937 * norm_grad = rtt_diff / minrtt;
3938 * new_per = curper * (1 - B * norm_grad)
3940 * B = rack_gp_decrease_per (default 80%)
3941 * rtt_dif = input var current rtt-diff
3942 * curper = input var current percentage
3943 * minrtt = from rack filter
3945 * In order to do the floating point calculations above we
3946 * do an integer conversion. The code looks confusing so let me
3947 * translate it into something that use more variables and
3948 * is clearer for us humans :)
3950 * uint64_t norm_grad, inverse, reduce_by, final_result;
3951 * uint32_t perf;
3953 * norm_grad = (((uint64_t)rtt_diff * 1000000) /
3954 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt));
3955 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad;
3956 * inverse /= 1000000;
3957 * reduce_by = (1000000 - inverse);
3958 * final_result = (cur_per * reduce_by) / 1000000;
3959 * perf = (uint32_t)final_result;
3961 uint64_t perf;
3963 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3964 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3965 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3966 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3967 (uint64_t)1000000)) /
3968 (uint64_t)1000000);
3969 if (perf > curper) {
3970 /* TSNH */
3971 perf = curper - 1;
3973 return ((uint32_t)perf);
3976 static uint32_t
3977 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3980 * highrttthresh
3981 * result = curper * (1 - (B * ( 1 - ------ ))
3982 * gp_srtt
3984 * B = rack_gp_decrease_per (default .8 i.e. 80)
3985 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3987 uint64_t perf;
3988 uint32_t highrttthresh;
3990 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3992 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3993 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3994 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3995 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3996 if (tcp_bblogging_on(rack->rc_tp)) {
3997 uint64_t log1;
3999 log1 = rtt;
4000 log1 <<= 32;
4001 log1 |= highrttthresh;
4002 rack_log_timely(rack,
4003 rack_gp_decrease_per,
4004 (uint64_t)curper,
4005 log1,
4006 perf,
4007 __LINE__,
4008 15);
4010 return (perf);
4013 static void
4014 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
4016 uint64_t logvar, logvar2, logvar3;
4017 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
4019 if (rack->rc_skip_timely)
4020 return;
4021 if (rack->rc_gp_incr) {
4022 /* Turn off increment counting */
4023 rack->rc_gp_incr = 0;
4024 rack->rc_gp_timely_inc_cnt = 0;
4026 ss_red = ca_red = rec_red = 0;
4027 logged = 0;
4028 /* Calculate the reduction value */
4029 if (rtt_diff < 0) {
4030 rtt_diff *= -1;
4032 /* Must be at least 1% reduction */
4033 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
4034 /* We have been in recovery ding it too */
4035 if (timely_says == 2) {
4036 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
4037 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
4038 if (alt < new_per)
4039 val = alt;
4040 else
4041 val = new_per;
4042 } else
4043 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
4044 if (rack->r_ctl.rack_per_of_gp_rec > val) {
4045 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
4046 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
4047 } else {
4048 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
4049 rec_red = 0;
4051 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
4052 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
4053 logged |= 1;
4055 if (rack->rc_gp_saw_ss) {
4056 /* Sent in SS */
4057 if (timely_says == 2) {
4058 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
4059 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
4060 if (alt < new_per)
4061 val = alt;
4062 else
4063 val = new_per;
4064 } else
4065 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
4066 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
4067 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
4068 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
4069 } else {
4070 ss_red = new_per;
4071 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
4072 logvar = new_per;
4073 logvar <<= 32;
4074 logvar |= alt;
4075 logvar2 = (uint32_t)rtt;
4076 logvar2 <<= 32;
4077 logvar2 |= (uint32_t)rtt_diff;
4078 logvar3 = rack_gp_rtt_maxmul;
4079 logvar3 <<= 32;
4080 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4081 rack_log_timely(rack, timely_says,
4082 logvar2, logvar3,
4083 logvar, __LINE__, 10);
4085 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
4086 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
4087 logged |= 4;
4088 } else if (rack->rc_gp_saw_ca) {
4089 /* Sent in CA */
4090 if (timely_says == 2) {
4091 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
4092 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
4093 if (alt < new_per)
4094 val = alt;
4095 else
4096 val = new_per;
4097 } else
4098 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
4099 if (rack->r_ctl.rack_per_of_gp_ca > val) {
4100 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
4101 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
4102 } else {
4103 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
4104 ca_red = 0;
4105 logvar = new_per;
4106 logvar <<= 32;
4107 logvar |= alt;
4108 logvar2 = (uint32_t)rtt;
4109 logvar2 <<= 32;
4110 logvar2 |= (uint32_t)rtt_diff;
4111 logvar3 = rack_gp_rtt_maxmul;
4112 logvar3 <<= 32;
4113 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4114 rack_log_timely(rack, timely_says,
4115 logvar2, logvar3,
4116 logvar, __LINE__, 10);
4118 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
4119 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
4120 logged |= 2;
4122 if (rack->rc_gp_timely_dec_cnt < 0x7) {
4123 rack->rc_gp_timely_dec_cnt++;
4124 if (rack_timely_dec_clear &&
4125 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
4126 rack->rc_gp_timely_dec_cnt = 0;
4128 logvar = ss_red;
4129 logvar <<= 32;
4130 logvar |= ca_red;
4131 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
4132 __LINE__, 2);
4135 static void
4136 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
4137 uint32_t rtt, uint32_t line, uint8_t reas)
4139 if (tcp_bblogging_on(rack->rc_tp)) {
4140 union tcp_log_stackspecific log;
4141 struct timeval tv;
4143 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4144 log.u_bbr.flex1 = line;
4145 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
4146 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
4147 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
4148 log.u_bbr.flex5 = rtt;
4149 log.u_bbr.flex6 = rack->rc_highly_buffered;
4150 log.u_bbr.flex6 <<= 1;
4151 log.u_bbr.flex6 |= rack->forced_ack;
4152 log.u_bbr.flex6 <<= 1;
4153 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
4154 log.u_bbr.flex6 <<= 1;
4155 log.u_bbr.flex6 |= rack->in_probe_rtt;
4156 log.u_bbr.flex6 <<= 1;
4157 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
4158 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
4159 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
4160 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
4161 log.u_bbr.flex8 = reas;
4162 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4163 log.u_bbr.delRate = rack_get_bw(rack);
4164 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
4165 log.u_bbr.cur_del_rate <<= 32;
4166 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
4167 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
4168 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
4169 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
4170 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
4171 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
4172 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
4173 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
4174 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4175 log.u_bbr.rttProp = us_cts;
4176 log.u_bbr.rttProp <<= 32;
4177 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
4178 TCP_LOG_EVENTP(rack->rc_tp, NULL,
4179 &rack->rc_inp->inp_socket->so_rcv,
4180 &rack->rc_inp->inp_socket->so_snd,
4181 BBR_LOG_RTT_SHRINKS, 0,
4182 0, &log, false, &rack->r_ctl.act_rcv_time);
4186 static void
4187 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
4189 uint64_t bwdp;
4191 bwdp = rack_get_bw(rack);
4192 bwdp *= (uint64_t)rtt;
4193 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
4194 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
4195 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
4197 * A window protocol must be able to have 4 packets
4198 * outstanding as the floor in order to function
4199 * (especially considering delayed ack :D).
4201 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
4205 static void
4206 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
4209 * ProbeRTT is a bit different in rack_pacing than in
4210 * BBR. It is like BBR in that it uses the lowering of
4211 * the RTT as a signal that we saw something new and
4212 * counts from there for how long between. But it is
4213 * different in that its quite simple. It does not
4214 * play with the cwnd and wait until we get down
4215 * to N segments outstanding and hold that for
4216 * 200ms. Instead it just sets the pacing reduction
4217 * rate to a set percentage (70 by default) and hold
4218 * that for a number of recent GP Srtt's.
4220 uint32_t segsiz;
4222 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4223 if (rack->rc_gp_dyn_mul == 0)
4224 return;
4226 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
4227 /* We are idle */
4228 return;
4230 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
4231 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
4233 * Stop the goodput now, the idea here is
4234 * that future measurements with in_probe_rtt
4235 * won't register if they are not greater so
4236 * we want to get what info (if any) is available
4237 * now.
4239 rack_do_goodput_measurement(rack->rc_tp, rack,
4240 rack->rc_tp->snd_una, __LINE__,
4241 RACK_QUALITY_PROBERTT);
4243 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
4244 rack->r_ctl.rc_time_probertt_entered = us_cts;
4245 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
4246 rack->r_ctl.rc_pace_min_segs);
4247 rack->in_probe_rtt = 1;
4248 rack->measure_saw_probe_rtt = 1;
4249 rack->r_ctl.rc_time_probertt_starts = 0;
4250 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
4251 if (rack_probertt_use_min_rtt_entry)
4252 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
4253 else
4254 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
4255 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4256 __LINE__, RACK_RTTS_ENTERPROBE);
4259 static void
4260 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
4262 struct rack_sendmap *rsm;
4263 uint32_t segsiz;
4265 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
4266 rack->r_ctl.rc_pace_min_segs);
4267 rack->in_probe_rtt = 0;
4268 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
4269 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
4271 * Stop the goodput now, the idea here is
4272 * that future measurements with in_probe_rtt
4273 * won't register if they are not greater so
4274 * we want to get what info (if any) is available
4275 * now.
4277 rack_do_goodput_measurement(rack->rc_tp, rack,
4278 rack->rc_tp->snd_una, __LINE__,
4279 RACK_QUALITY_PROBERTT);
4280 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
4282 * We don't have enough data to make a measurement.
4283 * So lets just stop and start here after exiting
4284 * probe-rtt. We probably are not interested in
4285 * the results anyway.
4287 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
4290 * Measurements through the current snd_max are going
4291 * to be limited by the slower pacing rate.
4293 * We need to mark these as app-limited so we
4294 * don't collapse the b/w.
4296 rsm = tqhash_max(rack->r_ctl.tqh);
4297 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
4298 if (rack->r_ctl.rc_app_limited_cnt == 0)
4299 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
4300 else {
4302 * Go out to the end app limited and mark
4303 * this new one as next and move the end_appl up
4304 * to this guy.
4306 if (rack->r_ctl.rc_end_appl)
4307 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
4308 rack->r_ctl.rc_end_appl = rsm;
4310 rsm->r_flags |= RACK_APP_LIMITED;
4311 rack->r_ctl.rc_app_limited_cnt++;
4314 * Now, we need to examine our pacing rate multipliers.
4315 * If its under 100%, we need to kick it back up to
4316 * 100%. We also don't let it be over our "max" above
4317 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
4318 * Note setting clamp_atexit_prtt to 0 has the effect
4319 * of setting CA/SS to 100% always at exit (which is
4320 * the default behavior).
4322 if (rack_probertt_clear_is) {
4323 rack->rc_gp_incr = 0;
4324 rack->rc_gp_bwred = 0;
4325 rack->rc_gp_timely_inc_cnt = 0;
4326 rack->rc_gp_timely_dec_cnt = 0;
4328 /* Do we do any clamping at exit? */
4329 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
4330 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
4331 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
4333 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
4334 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
4335 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
4338 * Lets set rtt_diff to 0, so that we will get a "boost"
4339 * after exiting.
4341 rack->r_ctl.rc_rtt_diff = 0;
4343 /* Clear all flags so we start fresh */
4344 rack->rc_tp->t_bytes_acked = 0;
4345 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
4347 * If configured to, set the cwnd and ssthresh to
4348 * our targets.
4350 if (rack_probe_rtt_sets_cwnd) {
4351 uint64_t ebdp;
4352 uint32_t setto;
4354 /* Set ssthresh so we get into CA once we hit our target */
4355 if (rack_probertt_use_min_rtt_exit == 1) {
4356 /* Set to min rtt */
4357 rack_set_prtt_target(rack, segsiz,
4358 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
4359 } else if (rack_probertt_use_min_rtt_exit == 2) {
4360 /* Set to current gp rtt */
4361 rack_set_prtt_target(rack, segsiz,
4362 rack->r_ctl.rc_gp_srtt);
4363 } else if (rack_probertt_use_min_rtt_exit == 3) {
4364 /* Set to entry gp rtt */
4365 rack_set_prtt_target(rack, segsiz,
4366 rack->r_ctl.rc_entry_gp_rtt);
4367 } else {
4368 uint64_t sum;
4369 uint32_t setval;
4371 sum = rack->r_ctl.rc_entry_gp_rtt;
4372 sum *= 10;
4373 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
4374 if (sum >= 20) {
4376 * A highly buffered path needs
4377 * cwnd space for timely to work.
4378 * Lets set things up as if
4379 * we are heading back here again.
4381 setval = rack->r_ctl.rc_entry_gp_rtt;
4382 } else if (sum >= 15) {
4384 * Lets take the smaller of the
4385 * two since we are just somewhat
4386 * buffered.
4388 setval = rack->r_ctl.rc_gp_srtt;
4389 if (setval > rack->r_ctl.rc_entry_gp_rtt)
4390 setval = rack->r_ctl.rc_entry_gp_rtt;
4391 } else {
4393 * Here we are not highly buffered
4394 * and should pick the min we can to
4395 * keep from causing loss.
4397 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
4399 rack_set_prtt_target(rack, segsiz,
4400 setval);
4402 if (rack_probe_rtt_sets_cwnd > 1) {
4403 /* There is a percentage here to boost */
4404 ebdp = rack->r_ctl.rc_target_probertt_flight;
4405 ebdp *= rack_probe_rtt_sets_cwnd;
4406 ebdp /= 100;
4407 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
4408 } else
4409 setto = rack->r_ctl.rc_target_probertt_flight;
4410 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
4411 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
4412 /* Enforce a min */
4413 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
4415 /* If we set in the cwnd also set the ssthresh point so we are in CA */
4416 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
4418 rack_log_rtt_shrinks(rack, us_cts,
4419 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4420 __LINE__, RACK_RTTS_EXITPROBE);
4421 /* Clear times last so log has all the info */
4422 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
4423 rack->r_ctl.rc_time_probertt_entered = us_cts;
4424 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
4425 rack->r_ctl.rc_time_of_last_probertt = us_cts;
4428 static void
4429 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
4431 /* Check in on probe-rtt */
4433 if (rack->rc_gp_filled == 0) {
4434 /* We do not do p-rtt unless we have gp measurements */
4435 return;
4437 if (rack->in_probe_rtt) {
4438 uint64_t no_overflow;
4439 uint32_t endtime, must_stay;
4441 if (rack->r_ctl.rc_went_idle_time &&
4442 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
4444 * We went idle during prtt, just exit now.
4446 rack_exit_probertt(rack, us_cts);
4447 } else if (rack_probe_rtt_safety_val &&
4448 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
4449 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
4451 * Probe RTT safety value triggered!
4453 rack_log_rtt_shrinks(rack, us_cts,
4454 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4455 __LINE__, RACK_RTTS_SAFETY);
4456 rack_exit_probertt(rack, us_cts);
4458 /* Calculate the max we will wait */
4459 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
4460 if (rack->rc_highly_buffered)
4461 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
4462 /* Calculate the min we must wait */
4463 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
4464 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
4465 TSTMP_LT(us_cts, endtime)) {
4466 uint32_t calc;
4467 /* Do we lower more? */
4468 no_exit:
4469 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
4470 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
4471 else
4472 calc = 0;
4473 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
4474 if (calc) {
4475 /* Maybe */
4476 calc *= rack_per_of_gp_probertt_reduce;
4477 if (calc > rack_per_of_gp_probertt)
4478 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
4479 else
4480 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
4481 /* Limit it too */
4482 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
4483 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
4485 /* We must reach target or the time set */
4486 return;
4488 if (rack->r_ctl.rc_time_probertt_starts == 0) {
4489 if ((TSTMP_LT(us_cts, must_stay) &&
4490 rack->rc_highly_buffered) ||
4491 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
4492 rack->r_ctl.rc_target_probertt_flight)) {
4493 /* We are not past the must_stay time */
4494 goto no_exit;
4496 rack_log_rtt_shrinks(rack, us_cts,
4497 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4498 __LINE__, RACK_RTTS_REACHTARGET);
4499 rack->r_ctl.rc_time_probertt_starts = us_cts;
4500 if (rack->r_ctl.rc_time_probertt_starts == 0)
4501 rack->r_ctl.rc_time_probertt_starts = 1;
4502 /* Restore back to our rate we want to pace at in prtt */
4503 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
4506 * Setup our end time, some number of gp_srtts plus 200ms.
4508 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
4509 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
4510 if (rack_probertt_gpsrtt_cnt_div)
4511 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
4512 else
4513 endtime = 0;
4514 endtime += rack_min_probertt_hold;
4515 endtime += rack->r_ctl.rc_time_probertt_starts;
4516 if (TSTMP_GEQ(us_cts, endtime)) {
4517 /* yes, exit probertt */
4518 rack_exit_probertt(rack, us_cts);
4521 } else if ((rack->rc_skip_timely == 0) &&
4522 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) &&
4523 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) {
4524 /* Go into probertt, its been too long since we went lower */
4525 rack_enter_probertt(rack, us_cts);
4529 static void
4530 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
4531 uint32_t rtt, int32_t rtt_diff)
4533 uint64_t cur_bw, up_bnd, low_bnd, subfr;
4534 uint32_t losses;
4536 if ((rack->rc_gp_dyn_mul == 0) ||
4537 (rack->use_fixed_rate) ||
4538 (rack->in_probe_rtt) ||
4539 (rack->rc_always_pace == 0)) {
4540 /* No dynamic GP multiplier in play */
4541 return;
4543 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
4544 cur_bw = rack_get_bw(rack);
4545 /* Calculate our up and down range */
4546 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
4547 up_bnd /= 100;
4548 up_bnd += rack->r_ctl.last_gp_comp_bw;
4550 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
4551 subfr /= 100;
4552 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
4553 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
4555 * This is the case where our RTT is above
4556 * the max target and we have been configured
4557 * to just do timely no bonus up stuff in that case.
4559 * There are two configurations, set to 1, and we
4560 * just do timely if we are over our max. If its
4561 * set above 1 then we slam the multipliers down
4562 * to 100 and then decrement per timely.
4564 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4565 __LINE__, 3);
4566 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
4567 rack_validate_multipliers_at_or_below_100(rack);
4568 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4569 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) {
4571 * We are decreasing this is a bit complicated this
4572 * means we are loosing ground. This could be
4573 * because another flow entered and we are competing
4574 * for b/w with it. This will push the RTT up which
4575 * makes timely unusable unless we want to get shoved
4576 * into a corner and just be backed off (the age
4577 * old problem with delay based CC).
4579 * On the other hand if it was a route change we
4580 * would like to stay somewhat contained and not
4581 * blow out the buffers.
4583 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4584 __LINE__, 3);
4585 rack->r_ctl.last_gp_comp_bw = cur_bw;
4586 if (rack->rc_gp_bwred == 0) {
4587 /* Go into reduction counting */
4588 rack->rc_gp_bwred = 1;
4589 rack->rc_gp_timely_dec_cnt = 0;
4591 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) {
4593 * Push another time with a faster pacing
4594 * to try to gain back (we include override to
4595 * get a full raise factor).
4597 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
4598 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
4599 (timely_says == 0) ||
4600 (rack_down_raise_thresh == 0)) {
4602 * Do an override up in b/w if we were
4603 * below the threshold or if the threshold
4604 * is zero we always do the raise.
4606 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
4607 } else {
4608 /* Log it stays the same */
4609 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
4610 __LINE__, 11);
4612 rack->rc_gp_timely_dec_cnt++;
4613 /* We are not incrementing really no-count */
4614 rack->rc_gp_incr = 0;
4615 rack->rc_gp_timely_inc_cnt = 0;
4616 } else {
4618 * Lets just use the RTT
4619 * information and give up
4620 * pushing.
4622 goto use_timely;
4624 } else if ((timely_says != 2) &&
4625 !losses &&
4626 (last_bw_est > up_bnd)) {
4628 * We are increasing b/w lets keep going, updating
4629 * our b/w and ignoring any timely input, unless
4630 * of course we are at our max raise (if there is one).
4633 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4634 __LINE__, 3);
4635 rack->r_ctl.last_gp_comp_bw = cur_bw;
4636 if (rack->rc_gp_saw_ss &&
4637 rack->r_ctl.rack_per_upper_bound_ss &&
4638 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) {
4640 * In cases where we can't go higher
4641 * we should just use timely.
4643 goto use_timely;
4645 if (rack->rc_gp_saw_ca &&
4646 rack->r_ctl.rack_per_upper_bound_ca &&
4647 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) {
4649 * In cases where we can't go higher
4650 * we should just use timely.
4652 goto use_timely;
4654 rack->rc_gp_bwred = 0;
4655 rack->rc_gp_timely_dec_cnt = 0;
4656 /* You get a set number of pushes if timely is trying to reduce */
4657 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4658 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4659 } else {
4660 /* Log it stays the same */
4661 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
4662 __LINE__, 12);
4664 return;
4665 } else {
4667 * We are staying between the lower and upper range bounds
4668 * so use timely to decide.
4670 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4671 __LINE__, 3);
4672 use_timely:
4673 if (timely_says) {
4674 rack->rc_gp_incr = 0;
4675 rack->rc_gp_timely_inc_cnt = 0;
4676 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4677 !losses &&
4678 (last_bw_est < low_bnd)) {
4679 /* We are loosing ground */
4680 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4681 rack->rc_gp_timely_dec_cnt++;
4682 /* We are not incrementing really no-count */
4683 rack->rc_gp_incr = 0;
4684 rack->rc_gp_timely_inc_cnt = 0;
4685 } else
4686 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4687 } else {
4688 rack->rc_gp_bwred = 0;
4689 rack->rc_gp_timely_dec_cnt = 0;
4690 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4695 static int32_t
4696 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4698 int32_t timely_says;
4699 uint64_t log_mult, log_rtt_a_diff;
4701 log_rtt_a_diff = rtt;
4702 log_rtt_a_diff <<= 32;
4703 log_rtt_a_diff |= (uint32_t)rtt_diff;
4704 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4705 rack_gp_rtt_maxmul)) {
4706 /* Reduce the b/w multiplier */
4707 timely_says = 2;
4708 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4709 log_mult <<= 32;
4710 log_mult |= prev_rtt;
4711 rack_log_timely(rack, timely_says, log_mult,
4712 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4713 log_rtt_a_diff, __LINE__, 4);
4714 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4715 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4716 max(rack_gp_rtt_mindiv , 1)))) {
4717 /* Increase the b/w multiplier */
4718 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4719 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4720 max(rack_gp_rtt_mindiv , 1));
4721 log_mult <<= 32;
4722 log_mult |= prev_rtt;
4723 timely_says = 0;
4724 rack_log_timely(rack, timely_says, log_mult ,
4725 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4726 log_rtt_a_diff, __LINE__, 5);
4727 } else {
4729 * Use a gradient to find it the timely gradient
4730 * is:
4731 * grad = rc_rtt_diff / min_rtt;
4733 * anything below or equal to 0 will be
4734 * a increase indication. Anything above
4735 * zero is a decrease. Note we take care
4736 * of the actual gradient calculation
4737 * in the reduction (its not needed for
4738 * increase).
4740 log_mult = prev_rtt;
4741 if (rtt_diff <= 0) {
4743 * Rttdiff is less than zero, increase the
4744 * b/w multiplier (its 0 or negative)
4746 timely_says = 0;
4747 rack_log_timely(rack, timely_says, log_mult,
4748 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4749 } else {
4750 /* Reduce the b/w multiplier */
4751 timely_says = 1;
4752 rack_log_timely(rack, timely_says, log_mult,
4753 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4756 return (timely_says);
4759 static __inline int
4760 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
4762 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4763 SEQ_LEQ(rsm->r_end, tp->gput_ack)) {
4765 * This covers the case that the
4766 * resent is completely inside
4767 * the gp range or up to it.
4768 * |----------------|
4769 * |-----| <or>
4770 * |----|
4771 * <or> |---|
4773 return (1);
4774 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) &&
4775 SEQ_GT(rsm->r_end, tp->gput_seq)){
4777 * This covers the case of
4778 * |--------------|
4779 * |-------->|
4781 return (1);
4782 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4783 SEQ_LT(rsm->r_start, tp->gput_ack) &&
4784 SEQ_GEQ(rsm->r_end, tp->gput_ack)) {
4787 * This covers the case of
4788 * |--------------|
4789 * |-------->|
4791 return (1);
4793 return (0);
4796 static __inline void
4797 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
4800 if ((tp->t_flags & TF_GPUTINPROG) == 0)
4801 return;
4803 * We have a Goodput measurement in progress. Mark
4804 * the send if its within the window. If its not
4805 * in the window make sure it does not have the mark.
4807 if (rack_in_gp_window(tp, rsm))
4808 rsm->r_flags |= RACK_IN_GP_WIN;
4809 else
4810 rsm->r_flags &= ~RACK_IN_GP_WIN;
4813 static __inline void
4814 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
4816 /* A GP measurement is ending, clear all marks on the send map*/
4817 struct rack_sendmap *rsm = NULL;
4819 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4820 if (rsm == NULL) {
4821 rsm = tqhash_min(rack->r_ctl.tqh);
4823 /* Nothing left? */
4824 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){
4825 rsm->r_flags &= ~RACK_IN_GP_WIN;
4826 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4831 static __inline void
4832 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
4834 struct rack_sendmap *rsm = NULL;
4836 if (tp->snd_una == tp->snd_max) {
4837 /* Nothing outstanding yet, nothing to do here */
4838 return;
4840 if (SEQ_GT(tp->gput_seq, tp->snd_una)) {
4842 * We are measuring ahead of some outstanding
4843 * data. We need to walk through up until we get
4844 * to gp_seq marking so that no rsm is set incorrectly
4845 * with RACK_IN_GP_WIN.
4847 rsm = tqhash_min(rack->r_ctl.tqh);
4848 while (rsm != NULL) {
4849 rack_mark_in_gp_win(tp, rsm);
4850 if (SEQ_GEQ(rsm->r_end, tp->gput_seq))
4851 break;
4852 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4855 if (rsm == NULL) {
4857 * Need to find the GP seq, if rsm is
4858 * set we stopped as we hit it.
4860 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4861 if (rsm == NULL)
4862 return;
4863 rack_mark_in_gp_win(tp, rsm);
4866 * Now we may need to mark already sent rsm, ahead of
4867 * gput_seq in the window since they may have been sent
4868 * *before* we started our measurment. The rsm, if non-null
4869 * has been marked (note if rsm would have been NULL we would have
4870 * returned in the previous block). So we go to the next, and continue
4871 * until we run out of entries or we exceed the gp_ack value.
4873 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4874 while (rsm) {
4875 rack_mark_in_gp_win(tp, rsm);
4876 if (SEQ_GT(rsm->r_end, tp->gput_ack))
4877 break;
4878 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4882 static void
4883 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line)
4885 if (tcp_bblogging_on(rack->rc_tp)) {
4886 union tcp_log_stackspecific log;
4887 struct timeval tv;
4889 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4890 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4891 log.u_bbr.flex1 = add_part;
4892 log.u_bbr.flex2 = sub_part;
4893 log.u_bbr.flex3 = rack_wma_divisor;
4894 log.u_bbr.flex4 = srtt;
4895 log.u_bbr.flex7 = (uint16_t)line;
4896 log.u_bbr.flex8 = meth;
4897 log.u_bbr.delRate = rack->r_ctl.gp_bw;
4898 log.u_bbr.cur_del_rate = meas_bw;
4899 log.u_bbr.rttProp = utim;
4900 TCP_LOG_EVENTP(rack->rc_tp, NULL,
4901 &rack->rc_inp->inp_socket->so_rcv,
4902 &rack->rc_inp->inp_socket->so_snd,
4903 BBR_LOG_THRESH_CALC, 0,
4904 0, &log, false, &rack->r_ctl.act_rcv_time);
4908 static void
4909 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4910 tcp_seq th_ack, int line, uint8_t quality)
4912 uint64_t tim, bytes_ps, stim, utim;
4913 uint32_t segsiz, bytes, reqbytes, us_cts;
4914 int32_t gput, new_rtt_diff, timely_says;
4915 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4916 int did_add = 0;
4918 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4919 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4920 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4921 tim = us_cts - tp->gput_ts;
4922 else
4923 tim = 0;
4924 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4925 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4926 else
4927 stim = 0;
4929 * Use the larger of the send time or ack time. This prevents us
4930 * from being influenced by ack artifacts to come up with too
4931 * high of measurement. Note that since we are spanning over many more
4932 * bytes in most of our measurements hopefully that is less likely to
4933 * occur.
4935 if (tim > stim)
4936 utim = max(tim, 1);
4937 else
4938 utim = max(stim, 1);
4939 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4940 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL);
4941 if ((tim == 0) && (stim == 0)) {
4943 * Invalid measurement time, maybe
4944 * all on one ack/one send?
4946 bytes = 0;
4947 bytes_ps = 0;
4948 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4949 0, 0, 0, 10, __LINE__, NULL, quality);
4950 goto skip_measurement;
4952 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4953 /* We never made a us_rtt measurement? */
4954 bytes = 0;
4955 bytes_ps = 0;
4956 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4957 0, 0, 0, 10, __LINE__, NULL, quality);
4958 goto skip_measurement;
4961 * Calculate the maximum possible b/w this connection
4962 * could have. We base our calculation on the lowest
4963 * rtt we have seen during the measurement and the
4964 * largest rwnd the client has given us in that time. This
4965 * forms a BDP that is the maximum that we could ever
4966 * get to the client. Anything larger is not valid.
4968 * I originally had code here that rejected measurements
4969 * where the time was less than 1/2 the latest us_rtt.
4970 * But after thinking on that I realized its wrong since
4971 * say you had a 150Mbps or even 1Gbps link, and you
4972 * were a long way away.. example I am in Europe (100ms rtt)
4973 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4974 * bytes my time would be 1.2ms, and yet my rtt would say
4975 * the measurement was invalid the time was < 50ms. The
4976 * same thing is true for 150Mb (8ms of time).
4978 * A better way I realized is to look at what the maximum
4979 * the connection could possibly do. This is gated on
4980 * the lowest RTT we have seen and the highest rwnd.
4981 * We should in theory never exceed that, if we are
4982 * then something on the path is storing up packets
4983 * and then feeding them all at once to our endpoint
4984 * messing up our measurement.
4986 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4987 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4988 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4989 if (SEQ_LT(th_ack, tp->gput_seq)) {
4990 /* No measurement can be made */
4991 bytes = 0;
4992 bytes_ps = 0;
4993 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4994 0, 0, 0, 10, __LINE__, NULL, quality);
4995 goto skip_measurement;
4996 } else
4997 bytes = (th_ack - tp->gput_seq);
4998 bytes_ps = (uint64_t)bytes;
5000 * Don't measure a b/w for pacing unless we have gotten at least
5001 * an initial windows worth of data in this measurement interval.
5003 * Small numbers of bytes get badly influenced by delayed ack and
5004 * other artifacts. Note we take the initial window or our
5005 * defined minimum GP (defaulting to 10 which hopefully is the
5006 * IW).
5008 if (rack->rc_gp_filled == 0) {
5010 * The initial estimate is special. We
5011 * have blasted out an IW worth of packets
5012 * without a real valid ack ts results. We
5013 * then setup the app_limited_needs_set flag,
5014 * this should get the first ack in (probably 2
5015 * MSS worth) to be recorded as the timestamp.
5016 * We thus allow a smaller number of bytes i.e.
5017 * IW - 2MSS.
5019 reqbytes -= (2 * segsiz);
5020 /* Also lets fill previous for our first measurement to be neutral */
5021 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
5023 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
5024 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
5025 rack->r_ctl.rc_app_limited_cnt,
5026 0, 0, 10, __LINE__, NULL, quality);
5027 goto skip_measurement;
5030 * We now need to calculate the Timely like status so
5031 * we can update (possibly) the b/w multipliers.
5033 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
5034 if (rack->rc_gp_filled == 0) {
5035 /* No previous reading */
5036 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
5037 } else {
5038 if (rack->measure_saw_probe_rtt == 0) {
5040 * We don't want a probertt to be counted
5041 * since it will be negative incorrectly. We
5042 * expect to be reducing the RTT when we
5043 * pace at a slower rate.
5045 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
5046 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
5049 timely_says = rack_make_timely_judgement(rack,
5050 rack->r_ctl.rc_gp_srtt,
5051 rack->r_ctl.rc_rtt_diff,
5052 rack->r_ctl.rc_prev_gp_srtt
5054 bytes_ps *= HPTS_USEC_IN_SEC;
5055 bytes_ps /= utim;
5056 if (bytes_ps > rack->r_ctl.last_max_bw) {
5058 * Something is on path playing
5059 * since this b/w is not possible based
5060 * on our BDP (highest rwnd and lowest rtt
5061 * we saw in the measurement window).
5063 * Another option here would be to
5064 * instead skip the measurement.
5066 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
5067 bytes_ps, rack->r_ctl.last_max_bw, 0,
5068 11, __LINE__, NULL, quality);
5069 bytes_ps = rack->r_ctl.last_max_bw;
5071 /* We store gp for b/w in bytes per second */
5072 if (rack->rc_gp_filled == 0) {
5073 /* Initial measurement */
5074 if (bytes_ps) {
5075 rack->r_ctl.gp_bw = bytes_ps;
5076 rack->rc_gp_filled = 1;
5077 rack->r_ctl.num_measurements = 1;
5078 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
5079 } else {
5080 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
5081 rack->r_ctl.rc_app_limited_cnt,
5082 0, 0, 10, __LINE__, NULL, quality);
5084 if (tcp_in_hpts(rack->rc_tp) &&
5085 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
5087 * Ok we can't trust the pacer in this case
5088 * where we transition from un-paced to paced.
5089 * Or for that matter when the burst mitigation
5090 * was making a wild guess and got it wrong.
5091 * Stop the pacer and clear up all the aggregate
5092 * delays etc.
5094 tcp_hpts_remove(rack->rc_tp);
5095 rack->r_ctl.rc_hpts_flags = 0;
5096 rack->r_ctl.rc_last_output_to = 0;
5098 did_add = 2;
5099 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
5100 /* Still a small number run an average */
5101 rack->r_ctl.gp_bw += bytes_ps;
5102 addpart = rack->r_ctl.num_measurements;
5103 rack->r_ctl.num_measurements++;
5104 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
5105 /* We have collected enough to move forward */
5106 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
5108 rack_set_pace_segments(tp, rack, __LINE__, NULL);
5109 did_add = 3;
5110 } else {
5112 * We want to take 1/wma of the goodput and add in to 7/8th
5113 * of the old value weighted by the srtt. So if your measurement
5114 * period is say 2 SRTT's long you would get 1/4 as the
5115 * value, if it was like 1/2 SRTT then you would get 1/16th.
5117 * But we must be careful not to take too much i.e. if the
5118 * srtt is say 20ms and the measurement is taken over
5119 * 400ms our weight would be 400/20 i.e. 20. On the
5120 * other hand if we get a measurement over 1ms with a
5121 * 10ms rtt we only want to take a much smaller portion.
5123 uint8_t meth;
5125 if (rack->r_ctl.num_measurements < 0xff) {
5126 rack->r_ctl.num_measurements++;
5128 srtt = (uint64_t)tp->t_srtt;
5129 if (srtt == 0) {
5131 * Strange why did t_srtt go back to zero?
5133 if (rack->r_ctl.rc_rack_min_rtt)
5134 srtt = rack->r_ctl.rc_rack_min_rtt;
5135 else
5136 srtt = HPTS_USEC_IN_MSEC;
5139 * XXXrrs: Note for reviewers, in playing with
5140 * dynamic pacing I discovered this GP calculation
5141 * as done originally leads to some undesired results.
5142 * Basically you can get longer measurements contributing
5143 * too much to the WMA. Thus I changed it if you are doing
5144 * dynamic adjustments to only do the aportioned adjustment
5145 * if we have a very small (time wise) measurement. Longer
5146 * measurements just get there weight (defaulting to 1/8)
5147 * add to the WMA. We may want to think about changing
5148 * this to always do that for both sides i.e. dynamic
5149 * and non-dynamic... but considering lots of folks
5150 * were playing with this I did not want to change the
5151 * calculation per.se. without your thoughts.. Lawerence?
5152 * Peter??
5154 if (rack->rc_gp_dyn_mul == 0) {
5155 subpart = rack->r_ctl.gp_bw * utim;
5156 subpart /= (srtt * 8);
5157 if (subpart < (rack->r_ctl.gp_bw / 2)) {
5159 * The b/w update takes no more
5160 * away then 1/2 our running total
5161 * so factor it in.
5163 addpart = bytes_ps * utim;
5164 addpart /= (srtt * 8);
5165 meth = 1;
5166 } else {
5168 * Don't allow a single measurement
5169 * to account for more than 1/2 of the
5170 * WMA. This could happen on a retransmission
5171 * where utim becomes huge compared to
5172 * srtt (multiple retransmissions when using
5173 * the sending rate which factors in all the
5174 * transmissions from the first one).
5176 subpart = rack->r_ctl.gp_bw / 2;
5177 addpart = bytes_ps / 2;
5178 meth = 2;
5180 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__);
5181 resid_bw = rack->r_ctl.gp_bw - subpart;
5182 rack->r_ctl.gp_bw = resid_bw + addpart;
5183 did_add = 1;
5184 } else {
5185 if ((utim / srtt) <= 1) {
5187 * The b/w update was over a small period
5188 * of time. The idea here is to prevent a small
5189 * measurement time period from counting
5190 * too much. So we scale it based on the
5191 * time so it attributes less than 1/rack_wma_divisor
5192 * of its measurement.
5194 subpart = rack->r_ctl.gp_bw * utim;
5195 subpart /= (srtt * rack_wma_divisor);
5196 addpart = bytes_ps * utim;
5197 addpart /= (srtt * rack_wma_divisor);
5198 meth = 3;
5199 } else {
5201 * The scaled measurement was long
5202 * enough so lets just add in the
5203 * portion of the measurement i.e. 1/rack_wma_divisor
5205 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
5206 addpart = bytes_ps / rack_wma_divisor;
5207 meth = 4;
5209 if ((rack->measure_saw_probe_rtt == 0) ||
5210 (bytes_ps > rack->r_ctl.gp_bw)) {
5212 * For probe-rtt we only add it in
5213 * if its larger, all others we just
5214 * add in.
5216 did_add = 1;
5217 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__);
5218 resid_bw = rack->r_ctl.gp_bw - subpart;
5219 rack->r_ctl.gp_bw = resid_bw + addpart;
5222 rack_set_pace_segments(tp, rack, __LINE__, NULL);
5225 * We only watch the growth of the GP during the initial startup
5226 * or first-slowstart that ensues. If we ever needed to watch
5227 * growth of gp outside of that period all we need to do is
5228 * remove the first clause of this if (rc_initial_ss_comp).
5230 if ((rack->rc_initial_ss_comp == 0) &&
5231 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) {
5232 uint64_t gp_est;
5234 gp_est = bytes_ps;
5235 if (tcp_bblogging_on(rack->rc_tp)) {
5236 union tcp_log_stackspecific log;
5237 struct timeval tv;
5239 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5240 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5241 log.u_bbr.flex1 = rack->r_ctl.current_round;
5242 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise;
5243 log.u_bbr.delRate = gp_est;
5244 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest;
5245 log.u_bbr.flex8 = 41;
5246 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5247 0, &log, false, NULL, __func__, __LINE__,&tv);
5249 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) ||
5250 (rack->r_ctl.last_gpest == 0)) {
5252 * The round we get our measurement averaging going
5253 * is the base round so it always is the source point
5254 * for when we had our first increment. From there on
5255 * we only record the round that had a rise.
5257 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round;
5258 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw;
5259 } else if (gp_est >= rack->r_ctl.last_gpest) {
5261 * Test to see if its gone up enough
5262 * to set the round count up to now. Note
5263 * that on the seeding of the 4th measurement we
5265 gp_est *= 1000;
5266 gp_est /= rack->r_ctl.last_gpest;
5267 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) {
5269 * We went up enough to record the round.
5271 if (tcp_bblogging_on(rack->rc_tp)) {
5272 union tcp_log_stackspecific log;
5273 struct timeval tv;
5275 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5276 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5277 log.u_bbr.flex1 = rack->r_ctl.current_round;
5278 log.u_bbr.flex2 = (uint32_t)gp_est;
5279 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req;
5280 log.u_bbr.delRate = gp_est;
5281 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest;
5282 log.u_bbr.flex8 = 42;
5283 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5284 0, &log, false, NULL, __func__, __LINE__,&tv);
5286 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round;
5287 if (rack->r_ctl.use_gp_not_last == 1)
5288 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw;
5289 else
5290 rack->r_ctl.last_gpest = bytes_ps;
5294 if ((rack->gp_ready == 0) &&
5295 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
5296 /* We have enough measurements now */
5297 rack->gp_ready = 1;
5298 if (rack->dgp_on ||
5299 rack->rack_hibeta)
5300 rack_set_cc_pacing(rack);
5301 if (rack->defer_options)
5302 rack_apply_deferred_options(rack);
5304 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
5305 rack_get_bw(rack), 22, did_add, NULL, quality);
5306 /* We do not update any multipliers if we are in or have seen a probe-rtt */
5308 if ((rack->measure_saw_probe_rtt == 0) &&
5309 rack->rc_gp_rtt_set) {
5310 if (rack->rc_skip_timely == 0) {
5311 rack_update_multiplier(rack, timely_says, bytes_ps,
5312 rack->r_ctl.rc_gp_srtt,
5313 rack->r_ctl.rc_rtt_diff);
5316 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
5317 rack_get_bw(rack), 3, line, NULL, quality);
5318 rack_log_pacing_delay_calc(rack,
5319 bytes, /* flex2 */
5320 tim, /* flex1 */
5321 bytes_ps, /* bw_inuse */
5322 rack->r_ctl.gp_bw, /* delRate */
5323 rack_get_lt_bw(rack), /* rttProp */
5324 20, line, NULL, 0);
5325 /* reset the gp srtt and setup the new prev */
5326 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
5327 /* Record the lost count for the next measurement */
5328 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
5329 skip_measurement:
5331 * We restart our diffs based on the gpsrtt in the
5332 * measurement window.
5334 rack->rc_gp_rtt_set = 0;
5335 rack->rc_gp_saw_rec = 0;
5336 rack->rc_gp_saw_ca = 0;
5337 rack->rc_gp_saw_ss = 0;
5338 rack->rc_dragged_bottom = 0;
5339 if (quality == RACK_QUALITY_HIGH) {
5341 * Gput in the stats world is in kbps where bytes_ps is
5342 * bytes per second so we do ((x * 8)/ 1000).
5344 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000);
5345 #ifdef STATS
5346 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
5347 gput);
5349 * XXXLAS: This is a temporary hack, and should be
5350 * chained off VOI_TCP_GPUT when stats(9) grows an
5351 * API to deal with chained VOIs.
5353 if (tp->t_stats_gput_prev > 0)
5354 stats_voi_update_abs_s32(tp->t_stats,
5355 VOI_TCP_GPUT_ND,
5356 ((gput - tp->t_stats_gput_prev) * 100) /
5357 tp->t_stats_gput_prev);
5358 #endif
5359 tp->t_stats_gput_prev = gput;
5361 tp->t_flags &= ~TF_GPUTINPROG;
5363 * Now are we app limited now and there is space from where we
5364 * were to where we want to go?
5366 * We don't do the other case i.e. non-applimited here since
5367 * the next send will trigger us picking up the missing data.
5369 if (rack->r_ctl.rc_first_appl &&
5370 TCPS_HAVEESTABLISHED(tp->t_state) &&
5371 rack->r_ctl.rc_app_limited_cnt &&
5372 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
5373 ((rack->r_ctl.rc_first_appl->r_end - th_ack) >
5374 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
5376 * Yep there is enough outstanding to make a measurement here.
5378 struct rack_sendmap *rsm;
5380 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
5381 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
5382 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
5383 rack->app_limited_needs_set = 0;
5384 tp->gput_seq = th_ack;
5385 if (rack->in_probe_rtt)
5386 rack->measure_saw_probe_rtt = 1;
5387 else if ((rack->measure_saw_probe_rtt) &&
5388 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
5389 rack->measure_saw_probe_rtt = 0;
5390 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) {
5391 /* There is a full window to gain info from */
5392 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
5393 } else {
5394 /* We can only measure up to the applimited point */
5395 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack);
5396 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
5398 * We don't have enough to make a measurement.
5400 tp->t_flags &= ~TF_GPUTINPROG;
5401 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
5402 0, 0, 0, 6, __LINE__, NULL, quality);
5403 return;
5406 if (tp->t_state >= TCPS_FIN_WAIT_1) {
5408 * We will get no more data into the SB
5409 * this means we need to have the data available
5410 * before we start a measurement.
5412 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) {
5413 /* Nope not enough data. */
5414 return;
5417 tp->t_flags |= TF_GPUTINPROG;
5419 * Now we need to find the timestamp of the send at tp->gput_seq
5420 * for the send based measurement.
5422 rack->r_ctl.rc_gp_cumack_ts = 0;
5423 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
5424 if (rsm) {
5425 /* Ok send-based limit is set */
5426 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
5428 * Move back to include the earlier part
5429 * so our ack time lines up right (this may
5430 * make an overlapping measurement but thats
5431 * ok).
5433 tp->gput_seq = rsm->r_start;
5435 if (rsm->r_flags & RACK_ACKED) {
5436 struct rack_sendmap *nrsm;
5438 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
5439 tp->gput_seq = rsm->r_end;
5440 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
5441 if (nrsm)
5442 rsm = nrsm;
5443 else {
5444 rack->app_limited_needs_set = 1;
5446 } else
5447 rack->app_limited_needs_set = 1;
5448 /* We always go from the first send */
5449 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
5450 } else {
5452 * If we don't find the rsm due to some
5453 * send-limit set the current time, which
5454 * basically disables the send-limit.
5456 struct timeval tv;
5458 microuptime(&tv);
5459 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
5461 rack_tend_gp_marks(tp, rack);
5462 rack_log_pacing_delay_calc(rack,
5463 tp->gput_seq,
5464 tp->gput_ack,
5465 (uintptr_t)rsm,
5466 tp->gput_ts,
5467 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
5469 __LINE__, rsm, quality);
5470 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
5471 } else {
5473 * To make sure proper timestamp merging occurs, we need to clear
5474 * all GP marks if we don't start a measurement.
5476 rack_clear_gp_marks(tp, rack);
5481 * CC wrapper hook functions
5483 static void
5484 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
5485 uint16_t type, int32_t post_recovery)
5487 uint32_t prior_cwnd, acked;
5488 struct tcp_log_buffer *lgb = NULL;
5489 uint8_t labc_to_use, quality;
5491 INP_WLOCK_ASSERT(tptoinpcb(tp));
5492 tp->t_ccv.nsegs = nsegs;
5493 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una);
5494 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
5495 uint32_t max;
5497 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
5498 if (tp->t_ccv.bytes_this_ack > max) {
5499 tp->t_ccv.bytes_this_ack = max;
5502 #ifdef STATS
5503 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
5504 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
5505 #endif
5506 if ((th_ack == tp->snd_max) && rack->lt_bw_up) {
5508 * We will ack all the data, time to end any
5509 * lt_bw_up we have running until something
5510 * new is sent. Note we need to use the actual
5511 * ack_rcv_time which with pacing may be different.
5513 uint64_t tmark;
5515 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq);
5516 rack->r_ctl.lt_seq = tp->snd_max;
5517 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
5518 if (tmark >= rack->r_ctl.lt_timemark) {
5519 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
5521 rack->r_ctl.lt_timemark = tmark;
5522 rack->lt_bw_up = 0;
5524 quality = RACK_QUALITY_NONE;
5525 if ((tp->t_flags & TF_GPUTINPROG) &&
5526 rack_enough_for_measurement(tp, rack, th_ack, &quality)) {
5527 /* Measure the Goodput */
5528 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality);
5530 /* Which way our we limited, if not cwnd limited no advance in CA */
5531 if (tp->snd_cwnd <= tp->snd_wnd)
5532 tp->t_ccv.flags |= CCF_CWND_LIMITED;
5533 else
5534 tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
5535 if (tp->snd_cwnd > tp->snd_ssthresh) {
5536 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack,
5537 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
5538 /* For the setting of a window past use the actual scwnd we are using */
5539 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
5540 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
5541 tp->t_ccv.flags |= CCF_ABC_SENTAWND;
5543 } else {
5544 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
5545 tp->t_bytes_acked = 0;
5547 prior_cwnd = tp->snd_cwnd;
5548 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
5549 (rack_client_low_buf && rack->client_bufferlvl &&
5550 (rack->client_bufferlvl < rack_client_low_buf)))
5551 labc_to_use = rack->rc_labc;
5552 else
5553 labc_to_use = rack_max_abc_post_recovery;
5554 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5555 union tcp_log_stackspecific log;
5556 struct timeval tv;
5558 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5559 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5560 log.u_bbr.flex1 = th_ack;
5561 log.u_bbr.flex2 = tp->t_ccv.flags;
5562 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
5563 log.u_bbr.flex4 = tp->t_ccv.nsegs;
5564 log.u_bbr.flex5 = labc_to_use;
5565 log.u_bbr.flex6 = prior_cwnd;
5566 log.u_bbr.flex7 = V_tcp_do_newsack;
5567 log.u_bbr.flex8 = 1;
5568 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5569 0, &log, false, NULL, __func__, __LINE__,&tv);
5571 if (CC_ALGO(tp)->ack_received != NULL) {
5572 /* XXXLAS: Find a way to live without this */
5573 tp->t_ccv.curack = th_ack;
5574 tp->t_ccv.labc = labc_to_use;
5575 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC;
5576 CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
5578 if (lgb) {
5579 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
5581 if (rack->r_must_retran) {
5582 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
5584 * We now are beyond the rxt point so lets disable
5585 * the flag.
5587 rack->r_ctl.rc_out_at_rto = 0;
5588 rack->r_must_retran = 0;
5589 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
5591 * Only decrement the rc_out_at_rto if the cwnd advances
5592 * at least a whole segment. Otherwise next time the peer
5593 * acks, we won't be able to send this generaly happens
5594 * when we are in Congestion Avoidance.
5596 if (acked <= rack->r_ctl.rc_out_at_rto){
5597 rack->r_ctl.rc_out_at_rto -= acked;
5598 } else {
5599 rack->r_ctl.rc_out_at_rto = 0;
5603 #ifdef STATS
5604 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
5605 #endif
5606 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
5607 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
5609 if ((rack->rc_initial_ss_comp == 0) &&
5610 (tp->snd_cwnd >= tp->snd_ssthresh)) {
5612 * The cwnd has grown beyond ssthresh we have
5613 * entered ca and completed our first Slowstart.
5615 rack->rc_initial_ss_comp = 1;
5619 static void
5620 tcp_rack_partialack(struct tcpcb *tp)
5622 struct tcp_rack *rack;
5624 rack = (struct tcp_rack *)tp->t_fb_ptr;
5625 INP_WLOCK_ASSERT(tptoinpcb(tp));
5627 * If we are doing PRR and have enough
5628 * room to send <or> we are pacing and prr
5629 * is disabled we will want to see if we
5630 * can send data (by setting r_wanted_output to
5631 * true).
5633 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
5634 rack->rack_no_prr)
5635 rack->r_wanted_output = 1;
5638 static void
5639 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how)
5642 * Now exit recovery.
5644 EXIT_RECOVERY(tp->t_flags);
5647 static void
5648 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
5650 struct tcp_rack *rack;
5651 uint32_t orig_cwnd;
5653 orig_cwnd = tp->snd_cwnd;
5654 INP_WLOCK_ASSERT(tptoinpcb(tp));
5655 rack = (struct tcp_rack *)tp->t_fb_ptr;
5656 /* only alert CC if we alerted when we entered */
5657 if (CC_ALGO(tp)->post_recovery != NULL) {
5658 tp->t_ccv.curack = th_ack;
5659 CC_ALGO(tp)->post_recovery(&tp->t_ccv);
5660 if (tp->snd_cwnd < tp->snd_ssthresh) {
5662 * Rack has burst control and pacing
5663 * so lets not set this any lower than
5664 * snd_ssthresh per RFC-6582 (option 2).
5666 tp->snd_cwnd = tp->snd_ssthresh;
5669 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
5670 union tcp_log_stackspecific log;
5671 struct timeval tv;
5673 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5674 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5675 log.u_bbr.flex1 = th_ack;
5676 log.u_bbr.flex2 = tp->t_ccv.flags;
5677 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
5678 log.u_bbr.flex4 = tp->t_ccv.nsegs;
5679 log.u_bbr.flex5 = V_tcp_abc_l_var;
5680 log.u_bbr.flex6 = orig_cwnd;
5681 log.u_bbr.flex7 = V_tcp_do_newsack;
5682 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
5683 log.u_bbr.flex8 = 2;
5684 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
5685 0, &log, false, NULL, __func__, __LINE__, &tv);
5687 if ((rack->rack_no_prr == 0) &&
5688 (rack->no_prr_addback == 0) &&
5689 (rack->r_ctl.rc_prr_sndcnt > 0)) {
5691 * Suck the next prr cnt back into cwnd, but
5692 * only do that if we are not application limited.
5694 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) {
5696 * We are allowed to add back to the cwnd the amount we did
5697 * not get out if:
5698 * a) no_prr_addback is off.
5699 * b) we are not app limited
5700 * c) we are doing prr
5701 * <and>
5702 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
5704 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
5705 rack->r_ctl.rc_prr_sndcnt);
5707 rack->r_ctl.rc_prr_sndcnt = 0;
5708 rack_log_to_prr(rack, 1, 0, __LINE__);
5710 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
5711 tp->snd_recover = tp->snd_una;
5712 if (rack->r_ctl.dsack_persist) {
5713 rack->r_ctl.dsack_persist--;
5714 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
5715 rack->r_ctl.num_dsack = 0;
5717 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
5719 if (rack->rto_from_rec == 1) {
5720 rack->rto_from_rec = 0;
5721 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh)
5722 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh;
5724 rack_exit_recovery(tp, rack, 1);
5727 static void
5728 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
5730 struct tcp_rack *rack;
5731 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
5733 INP_WLOCK_ASSERT(tptoinpcb(tp));
5734 #ifdef STATS
5735 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
5736 #endif
5737 if (IN_RECOVERY(tp->t_flags) == 0) {
5738 in_rec_at_entry = 0;
5739 ssthresh_enter = tp->snd_ssthresh;
5740 cwnd_enter = tp->snd_cwnd;
5741 } else
5742 in_rec_at_entry = 1;
5743 rack = (struct tcp_rack *)tp->t_fb_ptr;
5744 switch (type) {
5745 case CC_NDUPACK:
5746 tp->t_flags &= ~TF_WASFRECOVERY;
5747 tp->t_flags &= ~TF_WASCRECOVERY;
5748 if (!IN_FASTRECOVERY(tp->t_flags)) {
5749 /* Check if this is the end of the initial Start-up i.e. initial slow-start */
5750 if (rack->rc_initial_ss_comp == 0) {
5751 /* Yep it is the end of the initial slowstart */
5752 rack->rc_initial_ss_comp = 1;
5754 rack->r_ctl.rc_prr_delivered = 0;
5755 rack->r_ctl.rc_prr_out = 0;
5756 rack->r_fast_output = 0;
5757 if (rack->rack_no_prr == 0) {
5758 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
5759 rack_log_to_prr(rack, 2, in_rec_at_entry, line);
5761 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
5762 tp->snd_recover = tp->snd_max;
5763 if (tp->t_flags2 & TF2_ECN_PERMIT)
5764 tp->t_flags2 |= TF2_ECN_SND_CWR;
5766 break;
5767 case CC_ECN:
5768 if (!IN_CONGRECOVERY(tp->t_flags) ||
5770 * Allow ECN reaction on ACK to CWR, if
5771 * that data segment was also CE marked.
5773 SEQ_GEQ(ack, tp->snd_recover)) {
5774 EXIT_CONGRECOVERY(tp->t_flags);
5775 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
5776 rack->r_fast_output = 0;
5777 tp->snd_recover = tp->snd_max + 1;
5778 if (tp->t_flags2 & TF2_ECN_PERMIT)
5779 tp->t_flags2 |= TF2_ECN_SND_CWR;
5781 break;
5782 case CC_RTO:
5783 tp->t_dupacks = 0;
5784 tp->t_bytes_acked = 0;
5785 rack->r_fast_output = 0;
5786 if (IN_RECOVERY(tp->t_flags))
5787 rack_exit_recovery(tp, rack, 2);
5788 orig_cwnd = tp->snd_cwnd;
5789 rack_log_to_prr(rack, 16, orig_cwnd, line);
5790 if (CC_ALGO(tp)->cong_signal == NULL) {
5791 /* TSNH */
5792 tp->snd_ssthresh = max(2,
5793 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
5794 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
5795 tp->snd_cwnd = ctf_fixed_maxseg(tp);
5797 if (tp->t_flags2 & TF2_ECN_PERMIT)
5798 tp->t_flags2 |= TF2_ECN_SND_CWR;
5799 break;
5800 case CC_RTO_ERR:
5801 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
5802 /* RTO was unnecessary, so reset everything. */
5803 tp->snd_cwnd = tp->snd_cwnd_prev;
5804 tp->snd_ssthresh = tp->snd_ssthresh_prev;
5805 tp->snd_recover = tp->snd_recover_prev;
5806 if (tp->t_flags & TF_WASFRECOVERY) {
5807 ENTER_FASTRECOVERY(tp->t_flags);
5808 tp->t_flags &= ~TF_WASFRECOVERY;
5810 if (tp->t_flags & TF_WASCRECOVERY) {
5811 ENTER_CONGRECOVERY(tp->t_flags);
5812 tp->t_flags &= ~TF_WASCRECOVERY;
5814 tp->snd_nxt = tp->snd_max;
5815 tp->t_badrxtwin = 0;
5816 break;
5818 if ((CC_ALGO(tp)->cong_signal != NULL) &&
5819 (type != CC_RTO)){
5820 tp->t_ccv.curack = ack;
5821 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
5823 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
5824 rack_log_to_prr(rack, 15, cwnd_enter, line);
5825 rack->r_ctl.dsack_byte_cnt = 0;
5826 rack->r_ctl.retran_during_recovery = 0;
5827 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
5828 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
5829 rack->r_ent_rec_ns = 1;
5833 static inline void
5834 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
5836 uint32_t i_cwnd;
5838 INP_WLOCK_ASSERT(tptoinpcb(tp));
5840 if (CC_ALGO(tp)->after_idle != NULL)
5841 CC_ALGO(tp)->after_idle(&tp->t_ccv);
5843 if (tp->snd_cwnd == 1)
5844 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
5845 else
5846 i_cwnd = rc_init_window(rack);
5849 * Being idle is no different than the initial window. If the cc
5850 * clamps it down below the initial window raise it to the initial
5851 * window.
5853 if (tp->snd_cwnd < i_cwnd) {
5854 tp->snd_cwnd = i_cwnd;
5859 * Indicate whether this ack should be delayed. We can delay the ack if
5860 * following conditions are met:
5861 * - There is no delayed ack timer in progress.
5862 * - Our last ack wasn't a 0-sized window. We never want to delay
5863 * the ack that opens up a 0-sized window.
5864 * - LRO wasn't used for this segment. We make sure by checking that the
5865 * segment size is not larger than the MSS.
5866 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5867 * connection.
5869 #define DELAY_ACK(tp, tlen) \
5870 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5871 ((tp->t_flags & TF_DELACK) == 0) && \
5872 (tlen <= tp->t_maxseg) && \
5873 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5875 static struct rack_sendmap *
5876 rack_find_lowest_rsm(struct tcp_rack *rack)
5878 struct rack_sendmap *rsm;
5881 * Walk the time-order transmitted list looking for an rsm that is
5882 * not acked. This will be the one that was sent the longest time
5883 * ago that is still outstanding.
5885 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
5886 if (rsm->r_flags & RACK_ACKED) {
5887 continue;
5889 goto finish;
5891 finish:
5892 return (rsm);
5895 static struct rack_sendmap *
5896 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5898 struct rack_sendmap *prsm;
5901 * Walk the sequence order list backward until we hit and arrive at
5902 * the highest seq not acked. In theory when this is called it
5903 * should be the last segment (which it was not).
5905 prsm = rsm;
5907 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) {
5908 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
5909 continue;
5911 return (prsm);
5913 return (NULL);
5916 static uint32_t
5917 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed)
5919 int32_t lro;
5920 uint32_t thresh;
5923 * lro is the flag we use to determine if we have seen reordering.
5924 * If it gets set we have seen reordering. The reorder logic either
5925 * works in one of two ways:
5927 * If reorder-fade is configured, then we track the last time we saw
5928 * re-ordering occur. If we reach the point where enough time as
5929 * passed we no longer consider reordering has occuring.
5931 * Or if reorder-face is 0, then once we see reordering we consider
5932 * the connection to alway be subject to reordering and just set lro
5933 * to 1.
5935 * In the end if lro is non-zero we add the extra time for
5936 * reordering in.
5938 if (srtt == 0)
5939 srtt = 1;
5940 if (rack->r_ctl.rc_reorder_ts) {
5941 if (rack->r_ctl.rc_reorder_fade) {
5942 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
5943 lro = cts - rack->r_ctl.rc_reorder_ts;
5944 if (lro == 0) {
5946 * No time as passed since the last
5947 * reorder, mark it as reordering.
5949 lro = 1;
5951 } else {
5952 /* Negative time? */
5953 lro = 0;
5955 if (lro > rack->r_ctl.rc_reorder_fade) {
5956 /* Turn off reordering seen too */
5957 rack->r_ctl.rc_reorder_ts = 0;
5958 lro = 0;
5960 } else {
5961 /* Reodering does not fade */
5962 lro = 1;
5964 } else {
5965 lro = 0;
5967 if (rack->rc_rack_tmr_std_based == 0) {
5968 thresh = srtt + rack->r_ctl.rc_pkt_delay;
5969 } else {
5970 /* Standards based pkt-delay is 1/4 srtt */
5971 thresh = srtt + (srtt >> 2);
5973 if (lro && (rack->rc_rack_tmr_std_based == 0)) {
5974 /* It must be set, if not you get 1/4 rtt */
5975 if (rack->r_ctl.rc_reorder_shift)
5976 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
5977 else
5978 thresh += (srtt >> 2);
5980 if (rack->rc_rack_use_dsack &&
5981 lro &&
5982 (rack->r_ctl.num_dsack > 0)) {
5984 * We only increase the reordering window if we
5985 * have seen reordering <and> we have a DSACK count.
5987 thresh += rack->r_ctl.num_dsack * (srtt >> 2);
5988 if (log_allowed)
5989 rack_log_dsack_event(rack, 4, line, srtt, thresh);
5991 /* SRTT * 2 is the ceiling */
5992 if (thresh > (srtt * 2)) {
5993 thresh = srtt * 2;
5995 /* And we don't want it above the RTO max either */
5996 if (thresh > rack_rto_max) {
5997 thresh = rack_rto_max;
5999 if (log_allowed)
6000 rack_log_dsack_event(rack, 6, line, srtt, thresh);
6001 return (thresh);
6004 static uint32_t
6005 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
6006 struct rack_sendmap *rsm, uint32_t srtt)
6008 struct rack_sendmap *prsm;
6009 uint32_t thresh, len;
6010 int segsiz;
6012 if (srtt == 0)
6013 srtt = 1;
6014 if (rack->r_ctl.rc_tlp_threshold)
6015 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
6016 else
6017 thresh = (srtt * 2);
6019 /* Get the previous sent packet, if any */
6020 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
6021 len = rsm->r_end - rsm->r_start;
6022 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
6023 /* Exactly like the ID */
6024 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
6025 uint32_t alt_thresh;
6027 * Compensate for delayed-ack with the d-ack time.
6029 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6030 if (alt_thresh > thresh)
6031 thresh = alt_thresh;
6033 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
6034 /* 2.1 behavior */
6035 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
6036 if (prsm && (len <= segsiz)) {
6038 * Two packets outstanding, thresh should be (2*srtt) +
6039 * possible inter-packet delay (if any).
6041 uint32_t inter_gap = 0;
6042 int idx, nidx;
6044 idx = rsm->r_rtr_cnt - 1;
6045 nidx = prsm->r_rtr_cnt - 1;
6046 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
6047 /* Yes it was sent later (or at the same time) */
6048 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
6050 thresh += inter_gap;
6051 } else if (len <= segsiz) {
6053 * Possibly compensate for delayed-ack.
6055 uint32_t alt_thresh;
6057 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6058 if (alt_thresh > thresh)
6059 thresh = alt_thresh;
6061 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
6062 /* 2.2 behavior */
6063 if (len <= segsiz) {
6064 uint32_t alt_thresh;
6066 * Compensate for delayed-ack with the d-ack time.
6068 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
6069 if (alt_thresh > thresh)
6070 thresh = alt_thresh;
6073 /* Not above an RTO */
6074 if (thresh > tp->t_rxtcur) {
6075 thresh = tp->t_rxtcur;
6077 /* Not above a RTO max */
6078 if (thresh > rack_rto_max) {
6079 thresh = rack_rto_max;
6081 /* Apply user supplied min TLP */
6082 if (thresh < rack_tlp_min) {
6083 thresh = rack_tlp_min;
6085 return (thresh);
6088 static uint32_t
6089 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
6092 * We want the rack_rtt which is the
6093 * last rtt we measured. However if that
6094 * does not exist we fallback to the srtt (which
6095 * we probably will never do) and then as a last
6096 * resort we use RACK_INITIAL_RTO if no srtt is
6097 * yet set.
6099 if (rack->rc_rack_rtt)
6100 return (rack->rc_rack_rtt);
6101 else if (tp->t_srtt == 0)
6102 return (RACK_INITIAL_RTO);
6103 return (tp->t_srtt);
6106 static struct rack_sendmap *
6107 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
6110 * Check to see that we don't need to fall into recovery. We will
6111 * need to do so if our oldest transmit is past the time we should
6112 * have had an ack.
6114 struct tcp_rack *rack;
6115 struct rack_sendmap *rsm;
6116 int32_t idx;
6117 uint32_t srtt, thresh;
6119 rack = (struct tcp_rack *)tp->t_fb_ptr;
6120 if (tqhash_empty(rack->r_ctl.tqh)) {
6121 return (NULL);
6123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6124 if (rsm == NULL)
6125 return (NULL);
6128 if (rsm->r_flags & RACK_ACKED) {
6129 rsm = rack_find_lowest_rsm(rack);
6130 if (rsm == NULL)
6131 return (NULL);
6133 idx = rsm->r_rtr_cnt - 1;
6134 srtt = rack_grab_rtt(tp, rack);
6135 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1);
6136 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
6137 return (NULL);
6139 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
6140 return (NULL);
6142 /* Ok if we reach here we are over-due and this guy can be sent */
6143 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
6144 return (rsm);
6147 static uint32_t
6148 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
6150 int32_t t;
6151 int32_t tt;
6152 uint32_t ret_val;
6154 t = (tp->t_srtt + (tp->t_rttvar << 2));
6155 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
6156 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
6157 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
6158 ret_val = (uint32_t)tt;
6159 return (ret_val);
6162 static uint32_t
6163 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
6166 * Start the FR timer, we do this based on getting the first one in
6167 * the rc_tmap. Note that if its NULL we must stop the timer. in all
6168 * events we need to stop the running timer (if its running) before
6169 * starting the new one.
6171 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
6172 uint32_t srtt_cur;
6173 int32_t idx;
6174 int32_t is_tlp_timer = 0;
6175 struct rack_sendmap *rsm;
6177 if (rack->t_timers_stopped) {
6178 /* All timers have been stopped none are to run */
6179 return (0);
6181 if (rack->rc_in_persist) {
6182 /* We can't start any timer in persists */
6183 return (rack_get_persists_timer_val(tp, rack));
6185 rack->rc_on_min_to = 0;
6186 if ((tp->t_state < TCPS_ESTABLISHED) ||
6187 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
6188 goto activate_rxt;
6190 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6191 if ((rsm == NULL) || sup_rack) {
6192 /* Nothing on the send map or no rack */
6193 activate_rxt:
6194 time_since_sent = 0;
6195 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6196 if (rsm) {
6198 * Should we discount the RTX timer any?
6200 * We want to discount it the smallest amount.
6201 * If a timer (Rack/TLP or RXT) has gone off more
6202 * recently thats the discount we want to use (now - timer time).
6203 * If the retransmit of the oldest packet was more recent then
6204 * we want to use that (now - oldest-packet-last_transmit_time).
6207 idx = rsm->r_rtr_cnt - 1;
6208 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
6209 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
6210 else
6211 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6212 if (TSTMP_GT(cts, tstmp_touse))
6213 time_since_sent = cts - tstmp_touse;
6215 if (SEQ_LT(tp->snd_una, tp->snd_max) ||
6216 sbavail(&tptosocket(tp)->so_snd)) {
6217 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
6218 to = tp->t_rxtcur;
6219 if (to > time_since_sent)
6220 to -= time_since_sent;
6221 else
6222 to = rack->r_ctl.rc_min_to;
6223 if (to == 0)
6224 to = 1;
6225 /* Special case for KEEPINIT */
6226 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6227 (TP_KEEPINIT(tp) != 0) &&
6228 rsm) {
6230 * We have to put a ceiling on the rxt timer
6231 * of the keep-init timeout.
6233 uint32_t max_time, red;
6235 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
6236 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
6237 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
6238 if (red < max_time)
6239 max_time -= red;
6240 else
6241 max_time = 1;
6243 /* Reduce timeout to the keep value if needed */
6244 if (max_time < to)
6245 to = max_time;
6247 return (to);
6249 return (0);
6251 if (rsm->r_flags & RACK_ACKED) {
6252 rsm = rack_find_lowest_rsm(rack);
6253 if (rsm == NULL) {
6254 /* No lowest? */
6255 goto activate_rxt;
6258 /* Convert from ms to usecs */
6259 if ((rsm->r_flags & RACK_SACK_PASSED) ||
6260 (rsm->r_flags & RACK_RWND_COLLAPSED) ||
6261 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
6262 if ((tp->t_flags & TF_SENTFIN) &&
6263 ((tp->snd_max - tp->snd_una) == 1) &&
6264 (rsm->r_flags & RACK_HAS_FIN)) {
6266 * We don't start a rack timer if all we have is a
6267 * FIN outstanding.
6269 goto activate_rxt;
6271 if ((rack->use_rack_rr == 0) &&
6272 (IN_FASTRECOVERY(tp->t_flags)) &&
6273 (rack->rack_no_prr == 0) &&
6274 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
6276 * We are not cheating, in recovery and
6277 * not enough ack's to yet get our next
6278 * retransmission out.
6280 * Note that classified attackers do not
6281 * get to use the rack-cheat.
6283 goto activate_tlp;
6285 srtt = rack_grab_rtt(tp, rack);
6286 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1);
6287 idx = rsm->r_rtr_cnt - 1;
6288 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
6289 if (SEQ_GEQ(exp, cts)) {
6290 to = exp - cts;
6291 if (to < rack->r_ctl.rc_min_to) {
6292 to = rack->r_ctl.rc_min_to;
6293 if (rack->r_rr_config == 3)
6294 rack->rc_on_min_to = 1;
6296 } else {
6297 to = rack->r_ctl.rc_min_to;
6298 if (rack->r_rr_config == 3)
6299 rack->rc_on_min_to = 1;
6301 } else {
6302 /* Ok we need to do a TLP not RACK */
6303 activate_tlp:
6304 if ((rack->rc_tlp_in_progress != 0) &&
6305 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
6307 * The previous send was a TLP and we have sent
6308 * N TLP's without sending new data.
6310 goto activate_rxt;
6312 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
6313 if (rsm == NULL) {
6314 /* We found no rsm to TLP with. */
6315 goto activate_rxt;
6317 if (rsm->r_flags & RACK_HAS_FIN) {
6318 /* If its a FIN we dont do TLP */
6319 rsm = NULL;
6320 goto activate_rxt;
6322 idx = rsm->r_rtr_cnt - 1;
6323 time_since_sent = 0;
6324 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
6325 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6326 else
6327 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
6328 if (TSTMP_GT(cts, tstmp_touse))
6329 time_since_sent = cts - tstmp_touse;
6330 is_tlp_timer = 1;
6331 if (tp->t_srtt) {
6332 if ((rack->rc_srtt_measure_made == 0) &&
6333 (tp->t_srtt == 1)) {
6335 * If another stack as run and set srtt to 1,
6336 * then the srtt was 0, so lets use the initial.
6338 srtt = RACK_INITIAL_RTO;
6339 } else {
6340 srtt_cur = tp->t_srtt;
6341 srtt = srtt_cur;
6343 } else
6344 srtt = RACK_INITIAL_RTO;
6346 * If the SRTT is not keeping up and the
6347 * rack RTT has spiked we want to use
6348 * the last RTT not the smoothed one.
6350 if (rack_tlp_use_greater &&
6351 tp->t_srtt &&
6352 (srtt < rack_grab_rtt(tp, rack))) {
6353 srtt = rack_grab_rtt(tp, rack);
6355 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
6356 if (thresh > time_since_sent) {
6357 to = thresh - time_since_sent;
6358 } else {
6359 to = rack->r_ctl.rc_min_to;
6360 rack_log_alt_to_to_cancel(rack,
6361 thresh, /* flex1 */
6362 time_since_sent, /* flex2 */
6363 tstmp_touse, /* flex3 */
6364 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
6365 (uint32_t)rsm->r_tim_lastsent[idx],
6366 srtt,
6367 idx, 99);
6369 if (to < rack_tlp_min) {
6370 to = rack_tlp_min;
6372 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
6374 * If the TLP time works out to larger than the max
6375 * RTO lets not do TLP.. just RTO.
6377 goto activate_rxt;
6380 if (is_tlp_timer == 0) {
6381 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
6382 } else {
6383 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
6385 if (to == 0)
6386 to = 1;
6387 return (to);
6390 static void
6391 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una)
6393 if (rack->rc_in_persist == 0) {
6394 if (tp->t_flags & TF_GPUTINPROG) {
6396 * Stop the goodput now, the calling of the
6397 * measurement function clears the flag.
6399 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__,
6400 RACK_QUALITY_PERSIST);
6402 #ifdef NETFLIX_SHARED_CWND
6403 if (rack->r_ctl.rc_scw) {
6404 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
6405 rack->rack_scwnd_is_idle = 1;
6407 #endif
6408 rack->r_ctl.rc_went_idle_time = cts;
6409 if (rack->r_ctl.rc_went_idle_time == 0)
6410 rack->r_ctl.rc_went_idle_time = 1;
6411 if (rack->lt_bw_up) {
6412 /* Suspend our LT BW measurement */
6413 uint64_t tmark;
6415 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq);
6416 rack->r_ctl.lt_seq = snd_una;
6417 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
6418 if (tmark >= rack->r_ctl.lt_timemark) {
6419 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
6421 rack->r_ctl.lt_timemark = tmark;
6422 rack->lt_bw_up = 0;
6423 rack->r_persist_lt_bw_off = 1;
6425 rack_timer_cancel(tp, rack, cts, __LINE__);
6426 rack->r_ctl.persist_lost_ends = 0;
6427 rack->probe_not_answered = 0;
6428 rack->forced_ack = 0;
6429 tp->t_rxtshift = 0;
6430 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
6431 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
6432 rack->rc_in_persist = 1;
6436 static void
6437 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6439 if (tcp_in_hpts(rack->rc_tp)) {
6440 tcp_hpts_remove(rack->rc_tp);
6441 rack->r_ctl.rc_hpts_flags = 0;
6443 #ifdef NETFLIX_SHARED_CWND
6444 if (rack->r_ctl.rc_scw) {
6445 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
6446 rack->rack_scwnd_is_idle = 0;
6448 #endif
6449 if (rack->rc_gp_dyn_mul &&
6450 (rack->use_fixed_rate == 0) &&
6451 (rack->rc_always_pace)) {
6453 * Do we count this as if a probe-rtt just
6454 * finished?
6456 uint32_t time_idle, idle_min;
6458 time_idle = cts - rack->r_ctl.rc_went_idle_time;
6459 idle_min = rack_min_probertt_hold;
6460 if (rack_probertt_gpsrtt_cnt_div) {
6461 uint64_t extra;
6462 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
6463 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
6464 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
6465 idle_min += (uint32_t)extra;
6467 if (time_idle >= idle_min) {
6468 /* Yes, we count it as a probe-rtt. */
6469 uint32_t us_cts;
6471 us_cts = tcp_get_usecs(NULL);
6472 if (rack->in_probe_rtt == 0) {
6473 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
6474 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
6475 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
6476 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
6477 } else {
6478 rack_exit_probertt(rack, us_cts);
6482 if (rack->r_persist_lt_bw_off) {
6483 /* Continue where we left off */
6484 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL);
6485 rack->lt_bw_up = 1;
6486 rack->r_persist_lt_bw_off = 0;
6488 rack->rc_in_persist = 0;
6489 rack->r_ctl.rc_went_idle_time = 0;
6490 tp->t_rxtshift = 0;
6491 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
6492 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
6493 rack->r_ctl.rc_agg_delayed = 0;
6494 rack->r_early = 0;
6495 rack->r_late = 0;
6496 rack->r_ctl.rc_agg_early = 0;
6499 static void
6500 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
6501 struct hpts_diag *diag, struct timeval *tv)
6503 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
6504 union tcp_log_stackspecific log;
6506 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6507 log.u_bbr.flex1 = diag->p_nxt_slot;
6508 log.u_bbr.flex2 = diag->p_cur_slot;
6509 log.u_bbr.flex3 = diag->slot_req;
6510 log.u_bbr.flex4 = diag->inp_hptsslot;
6511 log.u_bbr.flex5 = diag->slot_remaining;
6512 log.u_bbr.flex6 = diag->need_new_to;
6513 log.u_bbr.flex7 = diag->p_hpts_active;
6514 log.u_bbr.flex8 = diag->p_on_min_sleep;
6515 /* Hijack other fields as needed */
6516 log.u_bbr.epoch = diag->have_slept;
6517 log.u_bbr.lt_epoch = diag->yet_to_sleep;
6518 log.u_bbr.pkts_out = diag->co_ret;
6519 log.u_bbr.applimited = diag->hpts_sleep_time;
6520 log.u_bbr.delivered = diag->p_prev_slot;
6521 log.u_bbr.inflight = diag->p_runningslot;
6522 log.u_bbr.bw_inuse = diag->wheel_slot;
6523 log.u_bbr.rttProp = diag->wheel_cts;
6524 log.u_bbr.timeStamp = cts;
6525 log.u_bbr.delRate = diag->maxslots;
6526 log.u_bbr.cur_del_rate = diag->p_curtick;
6527 log.u_bbr.cur_del_rate <<= 32;
6528 log.u_bbr.cur_del_rate |= diag->p_lasttick;
6529 TCP_LOG_EVENTP(rack->rc_tp, NULL,
6530 &rack->rc_inp->inp_socket->so_rcv,
6531 &rack->rc_inp->inp_socket->so_snd,
6532 BBR_LOG_HPTSDIAG, 0,
6533 0, &log, false, tv);
6538 static void
6539 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
6541 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
6542 union tcp_log_stackspecific log;
6543 struct timeval tv;
6545 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6546 log.u_bbr.flex1 = sb->sb_flags;
6547 log.u_bbr.flex2 = len;
6548 log.u_bbr.flex3 = sb->sb_state;
6549 log.u_bbr.flex8 = type;
6550 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
6551 TCP_LOG_EVENTP(rack->rc_tp, NULL,
6552 &rack->rc_inp->inp_socket->so_rcv,
6553 &rack->rc_inp->inp_socket->so_snd,
6554 TCP_LOG_SB_WAKE, 0,
6555 len, &log, false, &tv);
6559 static void
6560 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
6561 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
6563 struct hpts_diag diag;
6564 struct inpcb *inp = tptoinpcb(tp);
6565 struct timeval tv;
6566 uint32_t delayed_ack = 0;
6567 uint32_t hpts_timeout;
6568 uint32_t entry_slot = slot;
6569 uint8_t stopped;
6570 uint32_t left = 0;
6571 uint32_t us_cts;
6573 if ((tp->t_state == TCPS_CLOSED) ||
6574 (tp->t_state == TCPS_LISTEN)) {
6575 return;
6577 if (tcp_in_hpts(tp)) {
6578 /* Already on the pacer */
6579 return;
6581 stopped = rack->rc_tmr_stopped;
6582 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
6583 left = rack->r_ctl.rc_timer_exp - cts;
6585 rack->r_ctl.rc_timer_exp = 0;
6586 rack->r_ctl.rc_hpts_flags = 0;
6587 us_cts = tcp_get_usecs(&tv);
6588 /* Now early/late accounting */
6589 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
6590 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
6592 * We have a early carry over set,
6593 * we can always add more time so we
6594 * can always make this compensation.
6596 * Note if ack's are allowed to wake us do not
6597 * penalize the next timer for being awoke
6598 * by an ack aka the rc_agg_early (non-paced mode).
6600 slot += rack->r_ctl.rc_agg_early;
6601 rack->r_early = 0;
6602 rack->r_ctl.rc_agg_early = 0;
6604 if ((rack->r_late) &&
6605 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) {
6607 * This is harder, we can
6608 * compensate some but it
6609 * really depends on what
6610 * the current pacing time is.
6612 if (rack->r_ctl.rc_agg_delayed >= slot) {
6614 * We can't compensate for it all.
6615 * And we have to have some time
6616 * on the clock. We always have a min
6617 * 10 slots (10 x 10 i.e. 100 usecs).
6619 if (slot <= HPTS_TICKS_PER_SLOT) {
6620 /* We gain delay */
6621 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
6622 slot = HPTS_TICKS_PER_SLOT;
6623 } else {
6624 /* We take off some */
6625 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
6626 slot = HPTS_TICKS_PER_SLOT;
6628 } else {
6629 slot -= rack->r_ctl.rc_agg_delayed;
6630 rack->r_ctl.rc_agg_delayed = 0;
6631 /* Make sure we have 100 useconds at minimum */
6632 if (slot < HPTS_TICKS_PER_SLOT) {
6633 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
6634 slot = HPTS_TICKS_PER_SLOT;
6636 if (rack->r_ctl.rc_agg_delayed == 0)
6637 rack->r_late = 0;
6639 } else if (rack->r_late) {
6640 /* r_use_hpts_min is on and so is DGP */
6641 uint32_t max_red;
6643 max_red = (slot * rack->r_ctl.max_reduction) / 100;
6644 if (max_red >= rack->r_ctl.rc_agg_delayed) {
6645 slot -= rack->r_ctl.rc_agg_delayed;
6646 rack->r_ctl.rc_agg_delayed = 0;
6647 } else {
6648 slot -= max_red;
6649 rack->r_ctl.rc_agg_delayed -= max_red;
6652 if ((rack->r_use_hpts_min == 1) &&
6653 (slot > 0) &&
6654 (rack->dgp_on == 1)) {
6656 * We are enforcing a min pacing timer
6657 * based on our hpts min timeout.
6659 uint32_t min;
6661 min = get_hpts_min_sleep_time();
6662 if (min > slot) {
6663 slot = min;
6666 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
6667 if (tp->t_flags & TF_DELACK) {
6668 delayed_ack = TICKS_2_USEC(tcp_delacktime);
6669 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
6671 if (delayed_ack && ((hpts_timeout == 0) ||
6672 (delayed_ack < hpts_timeout)))
6673 hpts_timeout = delayed_ack;
6674 else
6675 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6677 * If no timers are going to run and we will fall off the hptsi
6678 * wheel, we resort to a keep-alive timer if its configured.
6680 if ((hpts_timeout == 0) &&
6681 (slot == 0)) {
6682 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6683 (tp->t_state <= TCPS_CLOSING)) {
6685 * Ok we have no timer (persists, rack, tlp, rxt or
6686 * del-ack), we don't have segments being paced. So
6687 * all that is left is the keepalive timer.
6689 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
6690 /* Get the established keep-alive time */
6691 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
6692 } else {
6694 * Get the initial setup keep-alive time,
6695 * note that this is probably not going to
6696 * happen, since rack will be running a rxt timer
6697 * if a SYN of some sort is outstanding. It is
6698 * actually handled in rack_timeout_rxt().
6700 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
6702 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
6703 if (rack->in_probe_rtt) {
6705 * We want to instead not wake up a long time from
6706 * now but to wake up about the time we would
6707 * exit probe-rtt and initiate a keep-alive ack.
6708 * This will get us out of probe-rtt and update
6709 * our min-rtt.
6711 hpts_timeout = rack_min_probertt_hold;
6715 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
6716 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
6718 * RACK, TLP, persists and RXT timers all are restartable
6719 * based on actions input .. i.e we received a packet (ack
6720 * or sack) and that changes things (rw, or snd_una etc).
6721 * Thus we can restart them with a new value. For
6722 * keep-alive, delayed_ack we keep track of what was left
6723 * and restart the timer with a smaller value.
6725 if (left < hpts_timeout)
6726 hpts_timeout = left;
6728 if (hpts_timeout) {
6730 * Hack alert for now we can't time-out over 2,147,483
6731 * seconds (a bit more than 596 hours), which is probably ok
6732 * :).
6734 if (hpts_timeout > 0x7ffffffe)
6735 hpts_timeout = 0x7ffffffe;
6736 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
6738 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
6739 if ((rack->gp_ready == 0) &&
6740 (rack->use_fixed_rate == 0) &&
6741 (hpts_timeout < slot) &&
6742 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
6744 * We have no good estimate yet for the
6745 * old clunky burst mitigation or the
6746 * real pacing. And the tlp or rxt is smaller
6747 * than the pacing calculation. Lets not
6748 * pace that long since we know the calculation
6749 * so far is not accurate.
6751 slot = hpts_timeout;
6754 * Turn off all the flags for queuing by default. The
6755 * flags have important meanings to what happens when
6756 * LRO interacts with the transport. Most likely (by default now)
6757 * mbuf_queueing and ack compression are on. So the transport
6758 * has a couple of flags that control what happens (if those
6759 * are not on then these flags won't have any effect since it
6760 * won't go through the queuing LRO path).
6762 * TF2_MBUF_QUEUE_READY - This flags says that I am busy
6763 * pacing output, so don't disturb. But
6764 * it also means LRO can wake me if there
6765 * is a SACK arrival.
6767 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction
6768 * with the above flag (QUEUE_READY) and
6769 * when present it says don't even wake me
6770 * if a SACK arrives.
6772 * The idea behind these flags is that if we are pacing we
6773 * set the MBUF_QUEUE_READY and only get woken up if
6774 * a SACK arrives (which could change things) or if
6775 * our pacing timer expires. If, however, we have a rack
6776 * timer running, then we don't even want a sack to wake
6777 * us since the rack timer has to expire before we can send.
6779 * Other cases should usually have none of the flags set
6780 * so LRO can call into us.
6782 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY);
6783 if (slot) {
6784 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
6785 rack->r_ctl.rc_last_output_to = us_cts + slot;
6787 * A pacing timer (slot) is being set, in
6788 * such a case we cannot send (we are blocked by
6789 * the timer). So lets tell LRO that it should not
6790 * wake us unless there is a SACK. Note this only
6791 * will be effective if mbuf queueing is on or
6792 * compressed acks are being processed.
6794 tp->t_flags2 |= TF2_MBUF_QUEUE_READY;
6796 * But wait if we have a Rack timer running
6797 * even a SACK should not disturb us (with
6798 * the exception of r_rr_config 3).
6800 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) ||
6801 (IN_RECOVERY(tp->t_flags))) {
6802 if (rack->r_rr_config != 3)
6803 tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
6804 else if (rack->rc_pace_dnd) {
6806 * When DND is on, we only let a sack
6807 * interrupt us if we are not in recovery.
6809 * If DND is off, then we never hit here
6810 * and let all sacks wake us up.
6813 tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
6816 if (rack->rc_ack_can_sendout_data) {
6818 * Ahh but wait, this is that special case
6819 * where the pacing timer can be disturbed
6820 * backout the changes (used for non-paced
6821 * burst limiting).
6823 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE |
6824 TF2_MBUF_QUEUE_READY);
6826 if ((rack->use_rack_rr) &&
6827 (rack->r_rr_config < 2) &&
6828 ((hpts_timeout) && (hpts_timeout < slot))) {
6830 * Arrange for the hpts to kick back in after the
6831 * t-o if the t-o does not cause a send.
6833 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
6834 __LINE__, &diag);
6835 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6836 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
6837 } else {
6838 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
6839 __LINE__, &diag);
6840 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6841 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
6843 } else if (hpts_timeout) {
6845 * With respect to t_flags2(?) here, lets let any new acks wake
6846 * us up here. Since we are not pacing (no pacing timer), output
6847 * can happen so we should let it. If its a Rack timer, then any inbound
6848 * packet probably won't change the sending (we will be blocked)
6849 * but it may change the prr stats so letting it in (the set defaults
6850 * at the start of this block) are good enough.
6852 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6853 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
6854 __LINE__, &diag);
6855 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
6856 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
6857 } else {
6858 /* No timer starting */
6859 #ifdef INVARIANTS
6860 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
6861 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
6862 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
6864 #endif
6866 rack->rc_tmr_stopped = 0;
6867 if (slot)
6868 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
6871 static void
6872 rack_mark_lost(struct tcpcb *tp,
6873 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts)
6875 struct rack_sendmap *nrsm;
6876 uint32_t thresh, exp;
6878 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0);
6879 nrsm = rsm;
6880 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) {
6881 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) {
6882 /* Got up to all that were marked sack-passed */
6883 break;
6885 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) {
6886 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh;
6887 if (TSTMP_LT(exp, cts) || (exp == cts)) {
6888 /* We now consider it lost */
6889 nrsm->r_flags |= RACK_WAS_LOST;
6890 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start;
6891 } else {
6892 /* Past here it won't be lost so stop */
6893 break;
6900 * RACK Timer, here we simply do logging and house keeping.
6901 * the normal rack_output() function will call the
6902 * appropriate thing to check if we need to do a RACK retransmit.
6903 * We return 1, saying don't proceed with rack_output only
6904 * when all timers have been stopped (destroyed PCB?).
6906 static int
6907 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6910 * This timer simply provides an internal trigger to send out data.
6911 * The check_recovery_mode call will see if there are needed
6912 * retransmissions, if so we will enter fast-recovery. The output
6913 * call may or may not do the same thing depending on sysctl
6914 * settings.
6916 struct rack_sendmap *rsm;
6918 counter_u64_add(rack_to_tot, 1);
6919 if (rack->r_state && (rack->r_state != tp->t_state))
6920 rack_set_state(tp, rack);
6921 rack->rc_on_min_to = 0;
6922 rsm = rack_check_recovery_mode(tp, cts);
6923 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
6924 if (rsm) {
6925 /* We need to stroke any lost that are now declared as lost */
6926 rack_mark_lost(tp, rack, rsm, cts);
6927 rack->r_ctl.rc_resend = rsm;
6928 rack->r_timer_override = 1;
6929 if (rack->use_rack_rr) {
6931 * Don't accumulate extra pacing delay
6932 * we are allowing the rack timer to
6933 * over-ride pacing i.e. rrr takes precedence
6934 * if the pacing interval is longer than the rrr
6935 * time (in other words we get the min pacing
6936 * time versus rrr pacing time).
6938 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6941 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
6942 if (rsm == NULL) {
6943 /* restart a timer and return 1 */
6944 rack_start_hpts_timer(rack, tp, cts,
6945 0, 0, 0);
6946 return (1);
6948 return (0);
6953 static void
6954 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
6957 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) {
6959 * The trailing space changed, mbufs can grow
6960 * at the tail but they can't shrink from
6961 * it, KASSERT that. Adjust the orig_m_len to
6962 * compensate for this change.
6964 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)),
6965 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
6966 rsm->m,
6967 rsm,
6968 (intmax_t)M_TRAILINGROOM(rsm->m),
6969 rsm->orig_t_space,
6970 rsm->orig_m_len,
6971 rsm->m->m_len));
6972 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m));
6973 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
6975 if (rsm->m->m_len < rsm->orig_m_len) {
6977 * Mbuf shrank, trimmed off the top by an ack, our
6978 * offset changes.
6980 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)),
6981 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n",
6982 rsm->m, rsm->m->m_len,
6983 rsm, rsm->orig_m_len,
6984 rsm->soff));
6985 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len))
6986 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
6987 else
6988 rsm->soff = 0;
6989 rsm->orig_m_len = rsm->m->m_len;
6990 #ifdef INVARIANTS
6991 } else if (rsm->m->m_len > rsm->orig_m_len) {
6992 panic("rsm:%p m:%p m_len grew outside of t_space compensation",
6993 rsm, rsm->m);
6994 #endif
6998 static void
6999 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
7001 struct mbuf *m;
7002 uint32_t soff;
7004 if (src_rsm->m &&
7005 ((src_rsm->orig_m_len != src_rsm->m->m_len) ||
7006 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) {
7007 /* Fix up the orig_m_len and possibly the mbuf offset */
7008 rack_adjust_orig_mlen(src_rsm);
7010 m = src_rsm->m;
7011 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
7012 while (soff >= m->m_len) {
7013 /* Move out past this mbuf */
7014 soff -= m->m_len;
7015 m = m->m_next;
7016 KASSERT((m != NULL),
7017 ("rsm:%p nrsm:%p hit at soff:%u null m",
7018 src_rsm, rsm, soff));
7019 if (m == NULL) {
7020 /* This should *not* happen which is why there is a kassert */
7021 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
7022 (src_rsm->r_start - rack->rc_tp->snd_una),
7023 &src_rsm->soff);
7024 src_rsm->orig_m_len = src_rsm->m->m_len;
7025 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m);
7026 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
7027 (rsm->r_start - rack->rc_tp->snd_una),
7028 &rsm->soff);
7029 rsm->orig_m_len = rsm->m->m_len;
7030 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7031 return;
7034 rsm->m = m;
7035 rsm->soff = soff;
7036 rsm->orig_m_len = m->m_len;
7037 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7040 static __inline void
7041 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
7042 struct rack_sendmap *rsm, uint32_t start)
7044 int idx;
7046 nrsm->r_start = start;
7047 nrsm->r_end = rsm->r_end;
7048 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
7049 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt;
7050 nrsm->r_flags = rsm->r_flags;
7051 nrsm->r_dupack = rsm->r_dupack;
7052 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
7053 nrsm->r_rtr_bytes = 0;
7054 nrsm->r_fas = rsm->r_fas;
7055 nrsm->r_bas = rsm->r_bas;
7056 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start);
7057 nrsm->r_just_ret = rsm->r_just_ret;
7058 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
7059 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
7061 /* Now if we have SYN flag we keep it on the left edge */
7062 if (nrsm->r_flags & RACK_HAS_SYN)
7063 nrsm->r_flags &= ~RACK_HAS_SYN;
7064 /* Now if we have a FIN flag we keep it on the right edge */
7065 if (rsm->r_flags & RACK_HAS_FIN)
7066 rsm->r_flags &= ~RACK_HAS_FIN;
7067 /* Push bit must go to the right edge as well */
7068 if (rsm->r_flags & RACK_HAD_PUSH)
7069 rsm->r_flags &= ~RACK_HAD_PUSH;
7070 /* Clone over the state of the hw_tls flag */
7071 nrsm->r_hw_tls = rsm->r_hw_tls;
7073 * Now we need to find nrsm's new location in the mbuf chain
7074 * we basically calculate a new offset, which is soff +
7075 * how much is left in original rsm. Then we walk out the mbuf
7076 * chain to find the righ position, it may be the same mbuf
7077 * or maybe not.
7079 KASSERT(((rsm->m != NULL) ||
7080 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
7081 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
7082 if (rsm->m)
7083 rack_setup_offset_for_rsm(rack, rsm, nrsm);
7086 static struct rack_sendmap *
7087 rack_merge_rsm(struct tcp_rack *rack,
7088 struct rack_sendmap *l_rsm,
7089 struct rack_sendmap *r_rsm)
7092 * We are merging two ack'd RSM's,
7093 * the l_rsm is on the left (lower seq
7094 * values) and the r_rsm is on the right
7095 * (higher seq value). The simplest way
7096 * to merge these is to move the right
7097 * one into the left. I don't think there
7098 * is any reason we need to try to find
7099 * the oldest (or last oldest retransmitted).
7101 rack_log_map_chg(rack->rc_tp, rack, NULL,
7102 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
7103 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end);
7104 if (l_rsm->r_dupack < r_rsm->r_dupack)
7105 l_rsm->r_dupack = r_rsm->r_dupack;
7106 if (r_rsm->r_rtr_bytes)
7107 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
7108 if (r_rsm->r_in_tmap) {
7109 /* This really should not happen */
7110 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
7111 r_rsm->r_in_tmap = 0;
7114 /* Now the flags */
7115 if (r_rsm->r_flags & RACK_HAS_FIN)
7116 l_rsm->r_flags |= RACK_HAS_FIN;
7117 if (r_rsm->r_flags & RACK_TLP)
7118 l_rsm->r_flags |= RACK_TLP;
7119 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
7120 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
7121 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
7122 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
7124 * If both are app-limited then let the
7125 * free lower the count. If right is app
7126 * limited and left is not, transfer.
7128 l_rsm->r_flags |= RACK_APP_LIMITED;
7129 r_rsm->r_flags &= ~RACK_APP_LIMITED;
7130 if (r_rsm == rack->r_ctl.rc_first_appl)
7131 rack->r_ctl.rc_first_appl = l_rsm;
7133 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE);
7135 * We keep the largest value, which is the newest
7136 * send. We do this in case a segment that is
7137 * joined together and not part of a GP estimate
7138 * later gets expanded into the GP estimate.
7140 * We prohibit the merging of unlike kinds i.e.
7141 * all pieces that are in the GP estimate can be
7142 * merged and all pieces that are not in a GP estimate
7143 * can be merged, but not disimilar pieces. Combine
7144 * this with taking the highest here and we should
7145 * be ok unless of course the client reneges. Then
7146 * all bets are off.
7148 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] <
7149 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) {
7150 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)];
7153 * When merging two RSM's we also need to consider the ack time and keep
7154 * newest. If the ack gets merged into a measurement then that is the
7155 * one we will want to be using.
7157 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival)
7158 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival;
7160 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
7161 /* Transfer the split limit to the map we free */
7162 r_rsm->r_limit_type = l_rsm->r_limit_type;
7163 l_rsm->r_limit_type = 0;
7165 rack_free(rack, r_rsm);
7166 l_rsm->r_flags |= RACK_MERGED;
7167 return (l_rsm);
7171 * TLP Timer, here we simply setup what segment we want to
7172 * have the TLP expire on, the normal rack_output() will then
7173 * send it out.
7175 * We return 1, saying don't proceed with rack_output only
7176 * when all timers have been stopped (destroyed PCB?).
7178 static int
7179 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp)
7182 * Tail Loss Probe.
7184 struct rack_sendmap *rsm = NULL;
7185 int insret __diagused;
7186 struct socket *so = tptosocket(tp);
7187 uint32_t amm;
7188 uint32_t out, avail;
7189 int collapsed_win = 0;
7191 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
7192 /* Its not time yet */
7193 return (0);
7195 if (ctf_progress_timeout_check(tp, true)) {
7196 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7197 return (-ETIMEDOUT); /* tcp_drop() */
7200 * A TLP timer has expired. We have been idle for 2 rtts. So we now
7201 * need to figure out how to force a full MSS segment out.
7203 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
7204 rack->r_ctl.retran_during_recovery = 0;
7205 rack->r_might_revert = 0;
7206 rack->r_ctl.dsack_byte_cnt = 0;
7207 counter_u64_add(rack_tlp_tot, 1);
7208 if (rack->r_state && (rack->r_state != tp->t_state))
7209 rack_set_state(tp, rack);
7210 avail = sbavail(&so->so_snd);
7211 out = tp->snd_max - tp->snd_una;
7212 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
7213 /* special case, we need a retransmission */
7214 collapsed_win = 1;
7215 goto need_retran;
7217 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) {
7218 rack->r_ctl.dsack_persist--;
7219 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
7220 rack->r_ctl.num_dsack = 0;
7222 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
7224 if ((tp->t_flags & TF_GPUTINPROG) &&
7225 (rack->r_ctl.rc_tlp_cnt_out == 1)) {
7227 * If this is the second in a row
7228 * TLP and we are doing a measurement
7229 * its time to abandon the measurement.
7230 * Something is likely broken on
7231 * the clients network and measuring a
7232 * broken network does us no good.
7234 tp->t_flags &= ~TF_GPUTINPROG;
7235 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7236 rack->r_ctl.rc_gp_srtt /*flex1*/,
7237 tp->gput_seq,
7238 0, 0, 18, __LINE__, NULL, 0);
7241 * Check our send oldest always settings, and if
7242 * there is an oldest to send jump to the need_retran.
7244 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
7245 goto need_retran;
7247 if (avail > out) {
7248 /* New data is available */
7249 amm = avail - out;
7250 if (amm > ctf_fixed_maxseg(tp)) {
7251 amm = ctf_fixed_maxseg(tp);
7252 if ((amm + out) > tp->snd_wnd) {
7253 /* We are rwnd limited */
7254 goto need_retran;
7256 } else if (amm < ctf_fixed_maxseg(tp)) {
7257 /* not enough to fill a MTU */
7258 goto need_retran;
7260 if (IN_FASTRECOVERY(tp->t_flags)) {
7261 /* Unlikely */
7262 if (rack->rack_no_prr == 0) {
7263 if (out + amm <= tp->snd_wnd) {
7264 rack->r_ctl.rc_prr_sndcnt = amm;
7265 rack->r_ctl.rc_tlp_new_data = amm;
7266 rack_log_to_prr(rack, 4, 0, __LINE__);
7268 } else
7269 goto need_retran;
7270 } else {
7271 /* Set the send-new override */
7272 if (out + amm <= tp->snd_wnd)
7273 rack->r_ctl.rc_tlp_new_data = amm;
7274 else
7275 goto need_retran;
7277 rack->r_ctl.rc_tlpsend = NULL;
7278 counter_u64_add(rack_tlp_newdata, 1);
7279 goto send;
7281 need_retran:
7283 * Ok we need to arrange the last un-acked segment to be re-sent, or
7284 * optionally the first un-acked segment.
7286 if (collapsed_win == 0) {
7287 if (rack_always_send_oldest)
7288 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7289 else {
7290 rsm = tqhash_max(rack->r_ctl.tqh);
7291 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
7292 rsm = rack_find_high_nonack(rack, rsm);
7295 if (rsm == NULL) {
7296 #ifdef TCP_BLACKBOX
7297 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
7298 #endif
7299 goto out;
7301 } else {
7303 * We had a collapsed window, lets find
7304 * the point before the collapse.
7306 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una))
7307 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1));
7308 else {
7309 rsm = tqhash_min(rack->r_ctl.tqh);
7311 if (rsm == NULL) {
7312 /* Huh */
7313 goto out;
7316 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
7318 * We need to split this the last segment in two.
7320 struct rack_sendmap *nrsm;
7322 nrsm = rack_alloc_full_limit(rack);
7323 if (nrsm == NULL) {
7325 * No memory to split, we will just exit and punt
7326 * off to the RXT timer.
7328 goto out;
7330 rack_clone_rsm(rack, nrsm, rsm,
7331 (rsm->r_end - ctf_fixed_maxseg(tp)));
7332 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7333 #ifndef INVARIANTS
7334 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
7335 #else
7336 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
7337 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
7338 nrsm, insret, rack, rsm);
7340 #endif
7341 if (rsm->r_in_tmap) {
7342 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7343 nrsm->r_in_tmap = 1;
7345 rsm = nrsm;
7347 rack->r_ctl.rc_tlpsend = rsm;
7348 send:
7349 /* Make sure output path knows we are doing a TLP */
7350 *doing_tlp = 1;
7351 rack->r_timer_override = 1;
7352 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
7353 return (0);
7354 out:
7355 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
7356 return (0);
7360 * Delayed ack Timer, here we simply need to setup the
7361 * ACK_NOW flag and remove the DELACK flag. From there
7362 * the output routine will send the ack out.
7364 * We only return 1, saying don't proceed, if all timers
7365 * are stopped (destroyed PCB?).
7367 static int
7368 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7371 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
7372 tp->t_flags &= ~TF_DELACK;
7373 tp->t_flags |= TF_ACKNOW;
7374 KMOD_TCPSTAT_INC(tcps_delack);
7375 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
7376 return (0);
7379 static inline int
7380 rack_send_ack_challange(struct tcp_rack *rack)
7382 struct tcptemp *t_template;
7384 t_template = tcpip_maketemplate(rack->rc_inp);
7385 if (t_template) {
7386 if (rack->forced_ack == 0) {
7387 rack->forced_ack = 1;
7388 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
7389 } else {
7390 rack->probe_not_answered = 1;
7392 tcp_respond(rack->rc_tp, t_template->tt_ipgen,
7393 &t_template->tt_t, (struct mbuf *)NULL,
7394 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0);
7395 free(t_template, M_TEMP);
7396 /* This does send an ack so kill any D-ack timer */
7397 if (rack->rc_tp->t_flags & TF_DELACK)
7398 rack->rc_tp->t_flags &= ~TF_DELACK;
7399 return(1);
7400 } else
7401 return (0);
7406 * Persists timer, here we simply send the
7407 * same thing as a keepalive will.
7408 * the one byte send.
7410 * We only return 1, saying don't proceed, if all timers
7411 * are stopped (destroyed PCB?).
7413 static int
7414 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7416 int32_t retval = 1;
7418 if (rack->rc_in_persist == 0)
7419 return (0);
7420 if (ctf_progress_timeout_check(tp, false)) {
7421 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7422 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7423 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7424 return (-ETIMEDOUT); /* tcp_drop() */
7427 * Persistence timer into zero window. Force a byte to be output, if
7428 * possible.
7430 KMOD_TCPSTAT_INC(tcps_persisttimeo);
7432 * Hack: if the peer is dead/unreachable, we do not time out if the
7433 * window is closed. After a full backoff, drop the connection if
7434 * the idle time (no responses to probes) reaches the maximum
7435 * backoff that we would use if retransmitting.
7437 if (tp->t_rxtshift >= V_tcp_retries &&
7438 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
7439 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
7440 KMOD_TCPSTAT_INC(tcps_persistdrop);
7441 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7442 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7443 retval = -ETIMEDOUT; /* tcp_drop() */
7444 goto out;
7446 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
7447 tp->snd_una == tp->snd_max)
7448 rack_exit_persist(tp, rack, cts);
7449 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
7451 * If the user has closed the socket then drop a persisting
7452 * connection after a much reduced timeout.
7454 if (tp->t_state > TCPS_CLOSE_WAIT &&
7455 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
7456 KMOD_TCPSTAT_INC(tcps_persistdrop);
7457 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
7458 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
7459 retval = -ETIMEDOUT; /* tcp_drop() */
7460 goto out;
7462 if (rack_send_ack_challange(rack)) {
7463 /* only set it if we were answered */
7464 if (rack->probe_not_answered) {
7465 counter_u64_add(rack_persists_loss, 1);
7466 rack->r_ctl.persist_lost_ends++;
7468 counter_u64_add(rack_persists_sends, 1);
7469 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
7471 if (tp->t_rxtshift < V_tcp_retries)
7472 tp->t_rxtshift++;
7473 out:
7474 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
7475 rack_start_hpts_timer(rack, tp, cts,
7476 0, 0, 0);
7477 return (retval);
7481 * If a keepalive goes off, we had no other timers
7482 * happening. We always return 1 here since this
7483 * routine either drops the connection or sends
7484 * out a segment with respond.
7486 static int
7487 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7489 struct inpcb *inp = tptoinpcb(tp);
7491 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
7492 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
7494 * Keep-alive timer went off; send something or drop connection if
7495 * idle for too long.
7497 KMOD_TCPSTAT_INC(tcps_keeptimeo);
7498 if (tp->t_state < TCPS_ESTABLISHED)
7499 goto dropit;
7500 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
7501 tp->t_state <= TCPS_CLOSING) {
7502 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
7503 goto dropit;
7505 * Send a packet designed to force a response if the peer is
7506 * up and reachable: either an ACK if the connection is
7507 * still alive, or an RST if the peer has closed the
7508 * connection due to timeout or reboot. Using sequence
7509 * number tp->snd_una-1 causes the transmitted zero-length
7510 * segment to lie outside the receive window; by the
7511 * protocol spec, this requires the correspondent TCP to
7512 * respond.
7514 KMOD_TCPSTAT_INC(tcps_keepprobe);
7515 rack_send_ack_challange(rack);
7517 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
7518 return (1);
7519 dropit:
7520 KMOD_TCPSTAT_INC(tcps_keepdrops);
7521 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
7522 return (-ETIMEDOUT); /* tcp_drop() */
7526 * Retransmit helper function, clear up all the ack
7527 * flags and take care of important book keeping.
7529 static void
7530 rack_remxt_tmr(struct tcpcb *tp)
7533 * The retransmit timer went off, all sack'd blocks must be
7534 * un-acked.
7536 struct rack_sendmap *rsm, *trsm = NULL;
7537 struct tcp_rack *rack;
7539 rack = (struct tcp_rack *)tp->t_fb_ptr;
7540 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
7541 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
7542 rack->r_timer_override = 1;
7543 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
7544 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
7545 rack->r_late = 0;
7546 rack->r_early = 0;
7547 rack->r_ctl.rc_agg_delayed = 0;
7548 rack->r_ctl.rc_agg_early = 0;
7549 if (rack->r_state && (rack->r_state != tp->t_state))
7550 rack_set_state(tp, rack);
7551 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) {
7553 * We do not clear the scoreboard until we have had
7554 * more than rack_rxt_scoreboard_clear_thresh time-outs.
7556 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7557 if (rack->r_ctl.rc_resend != NULL)
7558 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
7560 return;
7563 * Ideally we would like to be able to
7564 * mark SACK-PASS on anything not acked here.
7566 * However, if we do that we would burst out
7567 * all that data 1ms apart. This would be unwise,
7568 * so for now we will just let the normal rxt timer
7569 * and tlp timer take care of it.
7571 * Also we really need to stick them back in sequence
7572 * order. This way we send in the proper order and any
7573 * sacks that come floating in will "re-ack" the data.
7574 * To do this we zap the tmap with an INIT and then
7575 * walk through and place every rsm in the tail queue
7576 * hash table back in its seq ordered place.
7578 TAILQ_INIT(&rack->r_ctl.rc_tmap);
7580 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
7581 rsm->r_dupack = 0;
7582 if (rack_verbose_logging)
7583 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7584 /* We must re-add it back to the tlist */
7585 if (trsm == NULL) {
7586 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7587 } else {
7588 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
7590 rsm->r_in_tmap = 1;
7591 trsm = rsm;
7592 if (rsm->r_flags & RACK_ACKED)
7593 rsm->r_flags |= RACK_WAS_ACKED;
7594 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST);
7595 rsm->r_flags |= RACK_MUST_RXT;
7597 /* zero the lost since it's all gone */
7598 rack->r_ctl.rc_considered_lost = 0;
7599 /* Clear the count (we just un-acked them) */
7600 rack->r_ctl.rc_sacked = 0;
7601 rack->r_ctl.rc_sacklast = NULL;
7602 /* Clear the tlp rtx mark */
7603 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh);
7604 if (rack->r_ctl.rc_resend != NULL)
7605 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
7606 rack->r_ctl.rc_prr_sndcnt = 0;
7607 rack_log_to_prr(rack, 6, 0, __LINE__);
7608 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh);
7609 if (rack->r_ctl.rc_resend != NULL)
7610 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
7611 if (((tp->t_flags & TF_SACK_PERMIT) == 0) &&
7612 ((tp->t_flags & TF_SENTFIN) == 0)) {
7614 * For non-sack customers new data
7615 * needs to go out as retransmits until
7616 * we retransmit up to snd_max.
7618 rack->r_must_retran = 1;
7619 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
7620 rack->r_ctl.rc_sacked);
7624 static void
7625 rack_convert_rtts(struct tcpcb *tp)
7627 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
7628 tp->t_rxtcur = RACK_REXMTVAL(tp);
7629 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
7630 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
7632 if (tp->t_rxtcur > rack_rto_max) {
7633 tp->t_rxtcur = rack_rto_max;
7637 static void
7638 rack_cc_conn_init(struct tcpcb *tp)
7640 struct tcp_rack *rack;
7641 uint32_t srtt;
7643 rack = (struct tcp_rack *)tp->t_fb_ptr;
7644 srtt = tp->t_srtt;
7645 cc_conn_init(tp);
7647 * Now convert to rack's internal format,
7648 * if required.
7650 if ((srtt == 0) && (tp->t_srtt != 0))
7651 rack_convert_rtts(tp);
7653 * We want a chance to stay in slowstart as
7654 * we create a connection. TCP spec says that
7655 * initially ssthresh is infinite. For our
7656 * purposes that is the snd_wnd.
7658 if (tp->snd_ssthresh < tp->snd_wnd) {
7659 tp->snd_ssthresh = tp->snd_wnd;
7662 * We also want to assure a IW worth of
7663 * data can get inflight.
7665 if (rc_init_window(rack) < tp->snd_cwnd)
7666 tp->snd_cwnd = rc_init_window(rack);
7670 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7671 * we will setup to retransmit the lowest seq number outstanding.
7673 static int
7674 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
7676 struct inpcb *inp = tptoinpcb(tp);
7677 int32_t rexmt;
7678 int32_t retval = 0;
7679 bool isipv6;
7681 if ((tp->t_flags & TF_GPUTINPROG) &&
7682 (tp->t_rxtshift)) {
7684 * We have had a second timeout
7685 * measurements on successive rxt's are not profitable.
7686 * It is unlikely to be of any use (the network is
7687 * broken or the client went away).
7689 tp->t_flags &= ~TF_GPUTINPROG;
7690 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7691 rack->r_ctl.rc_gp_srtt /*flex1*/,
7692 tp->gput_seq,
7693 0, 0, 18, __LINE__, NULL, 0);
7695 if (ctf_progress_timeout_check(tp, false)) {
7696 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
7697 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
7698 return (-ETIMEDOUT); /* tcp_drop() */
7700 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
7701 rack->r_ctl.retran_during_recovery = 0;
7702 rack->rc_ack_required = 1;
7703 rack->r_ctl.dsack_byte_cnt = 0;
7704 if (IN_RECOVERY(tp->t_flags) &&
7705 (rack->rto_from_rec == 0)) {
7707 * Mark that we had a rto while in recovery
7708 * and save the ssthresh so if we go back
7709 * into recovery we will have a chance
7710 * to slowstart back to the level.
7712 rack->rto_from_rec = 1;
7713 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh;
7715 if (IN_FASTRECOVERY(tp->t_flags))
7716 tp->t_flags |= TF_WASFRECOVERY;
7717 else
7718 tp->t_flags &= ~TF_WASFRECOVERY;
7719 if (IN_CONGRECOVERY(tp->t_flags))
7720 tp->t_flags |= TF_WASCRECOVERY;
7721 else
7722 tp->t_flags &= ~TF_WASCRECOVERY;
7723 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
7724 (tp->snd_una == tp->snd_max)) {
7725 /* Nothing outstanding .. nothing to do */
7726 return (0);
7728 if (rack->r_ctl.dsack_persist) {
7729 rack->r_ctl.dsack_persist--;
7730 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
7731 rack->r_ctl.num_dsack = 0;
7733 rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
7736 * Rack can only run one timer at a time, so we cannot
7737 * run a KEEPINIT (gating SYN sending) and a retransmit
7738 * timer for the SYN. So if we are in a front state and
7739 * have a KEEPINIT timer we need to check the first transmit
7740 * against now to see if we have exceeded the KEEPINIT time
7741 * (if one is set).
7743 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
7744 (TP_KEEPINIT(tp) != 0)) {
7745 struct rack_sendmap *rsm;
7747 rsm = tqhash_min(rack->r_ctl.tqh);
7748 if (rsm) {
7749 /* Ok we have something outstanding to test keepinit with */
7750 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
7751 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
7752 /* We have exceeded the KEEPINIT time */
7753 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
7754 goto drop_it;
7759 * Retransmission timer went off. Message has not been acked within
7760 * retransmit interval. Back off to a longer retransmit interval
7761 * and retransmit one segment.
7763 if ((rack->r_ctl.rc_resend == NULL) ||
7764 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
7766 * If the rwnd collapsed on
7767 * the one we are retransmitting
7768 * it does not count against the
7769 * rxt count.
7771 tp->t_rxtshift++;
7773 rack_remxt_tmr(tp);
7774 if (tp->t_rxtshift > V_tcp_retries) {
7775 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
7776 drop_it:
7777 tp->t_rxtshift = V_tcp_retries;
7778 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
7779 /* XXXGL: previously t_softerror was casted to uint16_t */
7780 MPASS(tp->t_softerror >= 0);
7781 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
7782 goto out; /* tcp_drop() */
7784 if (tp->t_state == TCPS_SYN_SENT) {
7786 * If the SYN was retransmitted, indicate CWND to be limited
7787 * to 1 segment in cc_conn_init().
7789 tp->snd_cwnd = 1;
7790 } else if (tp->t_rxtshift == 1) {
7792 * first retransmit; record ssthresh and cwnd so they can be
7793 * recovered if this turns out to be a "bad" retransmit. A
7794 * retransmit is considered "bad" if an ACK for this segment
7795 * is received within RTT/2 interval; the assumption here is
7796 * that the ACK was already in flight. See "On Estimating
7797 * End-to-End Network Path Properties" by Allman and Paxson
7798 * for more details.
7800 tp->snd_cwnd_prev = tp->snd_cwnd;
7801 tp->snd_ssthresh_prev = tp->snd_ssthresh;
7802 tp->snd_recover_prev = tp->snd_recover;
7803 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
7804 tp->t_flags |= TF_PREVVALID;
7805 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
7806 tp->t_flags &= ~TF_PREVVALID;
7807 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
7808 if ((tp->t_state == TCPS_SYN_SENT) ||
7809 (tp->t_state == TCPS_SYN_RECEIVED))
7810 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
7811 else
7812 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
7814 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
7815 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
7817 * We enter the path for PLMTUD if connection is established or, if
7818 * connection is FIN_WAIT_1 status, reason for the last is that if
7819 * amount of data we send is very small, we could send it in couple
7820 * of packets and process straight to FIN. In that case we won't
7821 * catch ESTABLISHED state.
7823 #ifdef INET6
7824 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
7825 #else
7826 isipv6 = false;
7827 #endif
7828 if (((V_tcp_pmtud_blackhole_detect == 1) ||
7829 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
7830 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
7831 ((tp->t_state == TCPS_ESTABLISHED) ||
7832 (tp->t_state == TCPS_FIN_WAIT_1))) {
7834 * Idea here is that at each stage of mtu probe (usually,
7835 * 1448 -> 1188 -> 524) should be given 2 chances to recover
7836 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
7837 * should take care of that.
7839 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
7840 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
7841 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
7842 tp->t_rxtshift % 2 == 0)) {
7844 * Enter Path MTU Black-hole Detection mechanism: -
7845 * Disable Path MTU Discovery (IP "DF" bit). -
7846 * Reduce MTU to lower value than what we negotiated
7847 * with peer.
7849 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
7850 /* Record that we may have found a black hole. */
7851 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
7852 /* Keep track of previous MSS. */
7853 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
7857 * Reduce the MSS to blackhole value or to the
7858 * default in an attempt to retransmit.
7860 #ifdef INET6
7861 if (isipv6 &&
7862 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
7863 /* Use the sysctl tuneable blackhole MSS. */
7864 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
7865 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
7866 } else if (isipv6) {
7867 /* Use the default MSS. */
7868 tp->t_maxseg = V_tcp_v6mssdflt;
7870 * Disable Path MTU Discovery when we switch
7871 * to minmss.
7873 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
7874 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
7876 #endif
7877 #if defined(INET6) && defined(INET)
7878 else
7879 #endif
7880 #ifdef INET
7881 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
7882 /* Use the sysctl tuneable blackhole MSS. */
7883 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
7884 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
7885 } else {
7886 /* Use the default MSS. */
7887 tp->t_maxseg = V_tcp_mssdflt;
7889 * Disable Path MTU Discovery when we switch
7890 * to minmss.
7892 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
7893 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
7895 #endif
7896 } else {
7898 * If further retransmissions are still unsuccessful
7899 * with a lowered MTU, maybe this isn't a blackhole
7900 * and we restore the previous MSS and blackhole
7901 * detection flags. The limit '6' is determined by
7902 * giving each probe stage (1448, 1188, 524) 2
7903 * chances to recover.
7905 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
7906 (tp->t_rxtshift >= 6)) {
7907 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
7908 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
7909 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
7910 if (tp->t_maxseg < V_tcp_mssdflt) {
7912 * The MSS is so small we should not
7913 * process incoming SACK's since we are
7914 * subject to attack in such a case.
7916 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT;
7917 } else {
7918 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT;
7920 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
7925 * Disable RFC1323 and SACK if we haven't got any response to
7926 * our third SYN to work-around some broken terminal servers
7927 * (most of which have hopefully been retired) that have bad VJ
7928 * header compression code which trashes TCP segments containing
7929 * unknown-to-them TCP options.
7931 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
7932 (tp->t_rxtshift == 3))
7933 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
7935 * If we backed off this far, our srtt estimate is probably bogus.
7936 * Clobber it so we'll take the next rtt measurement as our srtt;
7937 * move the current srtt into rttvar to keep the current retransmit
7938 * times until then.
7940 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
7941 #ifdef INET6
7942 if ((inp->inp_vflag & INP_IPV6) != 0)
7943 in6_losing(inp);
7944 else
7945 #endif
7946 in_losing(inp);
7947 tp->t_rttvar += tp->t_srtt;
7948 tp->t_srtt = 0;
7950 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
7951 tp->snd_recover = tp->snd_max;
7952 tp->t_flags |= TF_ACKNOW;
7953 tp->t_rtttime = 0;
7954 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__);
7955 out:
7956 return (retval);
7959 static int
7960 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp)
7962 int32_t ret = 0;
7963 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
7965 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
7966 (tp->t_flags & TF_GPUTINPROG)) {
7968 * We have a goodput in progress
7969 * and we have entered a late state.
7970 * Do we have enough data in the sb
7971 * to handle the GPUT request?
7973 uint32_t bytes;
7975 bytes = tp->gput_ack - tp->gput_seq;
7976 if (SEQ_GT(tp->gput_seq, tp->snd_una))
7977 bytes += tp->gput_seq - tp->snd_una;
7978 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
7980 * There are not enough bytes in the socket
7981 * buffer that have been sent to cover this
7982 * measurement. Cancel it.
7984 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
7985 rack->r_ctl.rc_gp_srtt /*flex1*/,
7986 tp->gput_seq,
7987 0, 0, 18, __LINE__, NULL, 0);
7988 tp->t_flags &= ~TF_GPUTINPROG;
7991 if (timers == 0) {
7992 return (0);
7994 if (tp->t_state == TCPS_LISTEN) {
7995 /* no timers on listen sockets */
7996 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
7997 return (0);
7998 return (1);
8000 if ((timers & PACE_TMR_RACK) &&
8001 rack->rc_on_min_to) {
8003 * For the rack timer when we
8004 * are on a min-timeout (which means rrr_conf = 3)
8005 * we don't want to check the timer. It may
8006 * be going off for a pace and thats ok we
8007 * want to send the retransmit (if its ready).
8009 * If its on a normal rack timer (non-min) then
8010 * we will check if its expired.
8012 goto skip_time_check;
8014 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
8015 uint32_t left;
8017 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
8018 ret = -1;
8019 rack_log_to_processing(rack, cts, ret, 0);
8020 return (0);
8022 if (hpts_calling == 0) {
8024 * A user send or queued mbuf (sack) has called us? We
8025 * return 0 and let the pacing guards
8026 * deal with it if they should or
8027 * should not cause a send.
8029 ret = -2;
8030 rack_log_to_processing(rack, cts, ret, 0);
8031 return (0);
8034 * Ok our timer went off early and we are not paced false
8035 * alarm, go back to sleep. We make sure we don't have
8036 * no-sack wakeup on since we no longer have a PKT_OUTPUT
8037 * flag in place.
8039 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
8040 ret = -3;
8041 left = rack->r_ctl.rc_timer_exp - cts;
8042 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left));
8043 rack_log_to_processing(rack, cts, ret, left);
8044 return (1);
8046 skip_time_check:
8047 rack->rc_tmr_stopped = 0;
8048 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
8049 if (timers & PACE_TMR_DELACK) {
8050 ret = rack_timeout_delack(tp, rack, cts);
8051 } else if (timers & PACE_TMR_RACK) {
8052 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8053 rack->r_fast_output = 0;
8054 ret = rack_timeout_rack(tp, rack, cts);
8055 } else if (timers & PACE_TMR_TLP) {
8056 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8057 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
8058 } else if (timers & PACE_TMR_RXT) {
8059 rack->r_ctl.rc_tlp_rxt_last_time = cts;
8060 rack->r_fast_output = 0;
8061 ret = rack_timeout_rxt(tp, rack, cts);
8062 } else if (timers & PACE_TMR_PERSIT) {
8063 ret = rack_timeout_persist(tp, rack, cts);
8064 } else if (timers & PACE_TMR_KEEP) {
8065 ret = rack_timeout_keepalive(tp, rack, cts);
8067 rack_log_to_processing(rack, cts, ret, timers);
8068 return (ret);
8071 static void
8072 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
8074 struct timeval tv;
8075 uint32_t us_cts, flags_on_entry;
8076 uint8_t hpts_removed = 0;
8078 flags_on_entry = rack->r_ctl.rc_hpts_flags;
8079 us_cts = tcp_get_usecs(&tv);
8080 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
8081 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
8082 ((tp->snd_max - tp->snd_una) == 0))) {
8083 tcp_hpts_remove(rack->rc_tp);
8084 hpts_removed = 1;
8085 /* If we were not delayed cancel out the flag. */
8086 if ((tp->snd_max - tp->snd_una) == 0)
8087 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
8088 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
8090 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
8091 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
8092 if (tcp_in_hpts(rack->rc_tp) &&
8093 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
8095 * Canceling timer's when we have no output being
8096 * paced. We also must remove ourselves from the
8097 * hpts.
8099 tcp_hpts_remove(rack->rc_tp);
8100 hpts_removed = 1;
8102 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
8104 if (hpts_removed == 0)
8105 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
8108 static int
8109 rack_stopall(struct tcpcb *tp)
8111 struct tcp_rack *rack;
8113 rack = (struct tcp_rack *)tp->t_fb_ptr;
8114 rack->t_timers_stopped = 1;
8116 tcp_hpts_remove(tp);
8118 return (0);
8121 static void
8122 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack)
8125 * Assure no timers are running.
8127 if (tcp_timer_active(tp, TT_PERSIST)) {
8128 /* We enter in persists, set the flag appropriately */
8129 rack->rc_in_persist = 1;
8131 if (tcp_in_hpts(rack->rc_tp)) {
8132 tcp_hpts_remove(rack->rc_tp);
8136 static void
8137 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
8138 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz)
8140 int32_t idx;
8142 rsm->r_rtr_cnt++;
8143 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
8144 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
8145 rsm->r_flags |= RACK_OVERMAX;
8147 rsm->r_act_rxt_cnt++;
8148 /* Peg the count/index */
8149 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8150 rsm->r_dupack = 0;
8151 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
8152 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
8153 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
8155 if (rsm->r_flags & RACK_WAS_LOST) {
8157 * We retransmitted it putting it back in flight
8158 * remove the lost desgination and reduce the
8159 * bytes considered lost.
8161 rsm->r_flags &= ~RACK_WAS_LOST;
8162 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
8163 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
8164 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
8165 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
8166 else
8167 rack->r_ctl.rc_considered_lost = 0;
8169 idx = rsm->r_rtr_cnt - 1;
8170 rsm->r_tim_lastsent[idx] = ts;
8172 * Here we don't add in the len of send, since its already
8173 * in snduna <->snd_max.
8175 rsm->r_fas = ctf_flight_size(rack->rc_tp,
8176 rack->r_ctl.rc_sacked);
8177 if (rsm->r_flags & RACK_ACKED) {
8178 /* Problably MTU discovery messing with us */
8179 rsm->r_flags &= ~RACK_ACKED;
8180 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8182 if (rsm->r_in_tmap) {
8183 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8184 rsm->r_in_tmap = 0;
8186 /* Lets make sure it really is in or not the GP window */
8187 rack_mark_in_gp_win(tp, rsm);
8188 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8189 rsm->r_in_tmap = 1;
8190 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz);
8191 /* Take off the must retransmit flag, if its on */
8192 if (rsm->r_flags & RACK_MUST_RXT) {
8193 if (rack->r_must_retran)
8194 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
8195 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
8197 * We have retransmitted all we need. Clear
8198 * any must retransmit flags.
8200 rack->r_must_retran = 0;
8201 rack->r_ctl.rc_out_at_rto = 0;
8203 rsm->r_flags &= ~RACK_MUST_RXT;
8205 /* Remove any collapsed flag */
8206 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
8207 if (rsm->r_flags & RACK_SACK_PASSED) {
8208 /* We have retransmitted due to the SACK pass */
8209 rsm->r_flags &= ~RACK_SACK_PASSED;
8210 rsm->r_flags |= RACK_WAS_SACKPASS;
8214 static uint32_t
8215 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
8216 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz)
8219 * We (re-)transmitted starting at rsm->r_start for some length
8220 * (possibly less than r_end.
8222 struct rack_sendmap *nrsm;
8223 int insret __diagused;
8224 uint32_t c_end;
8225 int32_t len;
8227 len = *lenp;
8228 c_end = rsm->r_start + len;
8229 if (SEQ_GEQ(c_end, rsm->r_end)) {
8231 * We retransmitted the whole piece or more than the whole
8232 * slopping into the next rsm.
8234 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8235 if (c_end == rsm->r_end) {
8236 *lenp = 0;
8237 return (0);
8238 } else {
8239 int32_t act_len;
8241 /* Hangs over the end return whats left */
8242 act_len = rsm->r_end - rsm->r_start;
8243 *lenp = (len - act_len);
8244 return (rsm->r_end);
8246 /* We don't get out of this block. */
8249 * Here we retransmitted less than the whole thing which means we
8250 * have to split this into what was transmitted and what was not.
8252 nrsm = rack_alloc_full_limit(rack);
8253 if (nrsm == NULL) {
8255 * We can't get memory, so lets not proceed.
8257 *lenp = 0;
8258 return (0);
8261 * So here we are going to take the original rsm and make it what we
8262 * retransmitted. nrsm will be the tail portion we did not
8263 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
8264 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
8265 * 1, 6 and the new piece will be 6, 11.
8267 rack_clone_rsm(rack, nrsm, rsm, c_end);
8268 nrsm->r_dupack = 0;
8269 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8270 #ifndef INVARIANTS
8271 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
8272 #else
8273 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
8274 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8275 nrsm, insret, rack, rsm);
8277 #endif
8278 if (rsm->r_in_tmap) {
8279 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8280 nrsm->r_in_tmap = 1;
8282 rsm->r_flags &= (~RACK_HAS_FIN);
8283 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8284 /* Log a split of rsm into rsm and nrsm */
8285 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8286 *lenp = 0;
8287 return (0);
8290 static void
8291 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
8292 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts,
8293 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb,
8294 uint32_t s_moff, int hw_tls, int segsiz)
8296 struct tcp_rack *rack;
8297 struct rack_sendmap *rsm, *nrsm;
8298 int insret __diagused;
8300 register uint32_t snd_max, snd_una;
8303 * Add to the RACK log of packets in flight or retransmitted. If
8304 * there is a TS option we will use the TS echoed, if not we will
8305 * grab a TS.
8307 * Retransmissions will increment the count and move the ts to its
8308 * proper place. Note that if options do not include TS's then we
8309 * won't be able to effectively use the ACK for an RTT on a retran.
8311 * Notes about r_start and r_end. Lets consider a send starting at
8312 * sequence 1 for 10 bytes. In such an example the r_start would be
8313 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
8314 * This means that r_end is actually the first sequence for the next
8315 * slot (11).
8319 * If err is set what do we do XXXrrs? should we not add the thing?
8320 * -- i.e. return if err != 0 or should we pretend we sent it? --
8321 * i.e. proceed with add ** do this for now.
8323 INP_WLOCK_ASSERT(tptoinpcb(tp));
8324 if (err)
8326 * We don't log errors -- we could but snd_max does not
8327 * advance in this case either.
8329 return;
8331 if (th_flags & TH_RST) {
8333 * We don't log resets and we return immediately from
8334 * sending
8336 return;
8338 rack = (struct tcp_rack *)tp->t_fb_ptr;
8339 snd_una = tp->snd_una;
8340 snd_max = tp->snd_max;
8341 if (th_flags & (TH_SYN | TH_FIN)) {
8343 * The call to rack_log_output is made before bumping
8344 * snd_max. This means we can record one extra byte on a SYN
8345 * or FIN if seq_out is adding more on and a FIN is present
8346 * (and we are not resending).
8348 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
8349 len++;
8350 if (th_flags & TH_FIN)
8351 len++;
8353 if (SEQ_LEQ((seq_out + len), snd_una)) {
8354 /* Are sending an old segment to induce an ack (keep-alive)? */
8355 return;
8357 if (SEQ_LT(seq_out, snd_una)) {
8358 /* huh? should we panic? */
8359 uint32_t end;
8361 end = seq_out + len;
8362 seq_out = snd_una;
8363 if (SEQ_GEQ(end, seq_out))
8364 len = end - seq_out;
8365 else
8366 len = 0;
8368 if (len == 0) {
8369 /* We don't log zero window probes */
8370 return;
8372 if (IN_FASTRECOVERY(tp->t_flags)) {
8373 rack->r_ctl.rc_prr_out += len;
8375 /* First question is it a retransmission or new? */
8376 if (seq_out == snd_max) {
8377 /* Its new */
8378 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts);
8379 again:
8380 rsm = rack_alloc(rack);
8381 if (rsm == NULL) {
8383 * Hmm out of memory and the tcb got destroyed while
8384 * we tried to wait.
8386 return;
8388 if (th_flags & TH_FIN) {
8389 rsm->r_flags = RACK_HAS_FIN|add_flag;
8390 } else {
8391 rsm->r_flags = add_flag;
8393 if (hw_tls)
8394 rsm->r_hw_tls = 1;
8395 rsm->r_tim_lastsent[0] = cts;
8396 rsm->r_rtr_cnt = 1;
8397 rsm->r_act_rxt_cnt = 0;
8398 rsm->r_rtr_bytes = 0;
8399 if (th_flags & TH_SYN) {
8400 /* The data space is one beyond snd_una */
8401 rsm->r_flags |= RACK_HAS_SYN;
8403 rsm->r_start = seq_out;
8404 rsm->r_end = rsm->r_start + len;
8405 rack_mark_in_gp_win(tp, rsm);
8406 rsm->r_dupack = 0;
8408 * save off the mbuf location that
8409 * sndmbuf_noadv returned (which is
8410 * where we started copying from)..
8412 rsm->m = s_mb;
8413 rsm->soff = s_moff;
8415 * Here we do add in the len of send, since its not yet
8416 * reflected in in snduna <->snd_max
8418 rsm->r_fas = (ctf_flight_size(rack->rc_tp,
8419 rack->r_ctl.rc_sacked) +
8420 (rsm->r_end - rsm->r_start));
8421 if ((rack->rc_initial_ss_comp == 0) &&
8422 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) {
8423 rack->r_ctl.ss_hi_fs = rsm->r_fas;
8425 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
8426 if (rsm->m) {
8427 if (rsm->m->m_len <= rsm->soff) {
8429 * XXXrrs Question, will this happen?
8431 * If sbsndptr is set at the correct place
8432 * then s_moff should always be somewhere
8433 * within rsm->m. But if the sbsndptr was
8434 * off then that won't be true. If it occurs
8435 * we need to walkout to the correct location.
8437 struct mbuf *lm;
8439 lm = rsm->m;
8440 while (lm->m_len <= rsm->soff) {
8441 rsm->soff -= lm->m_len;
8442 lm = lm->m_next;
8443 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
8444 __func__, rack, s_moff, s_mb, rsm->soff));
8446 rsm->m = lm;
8448 rsm->orig_m_len = rsm->m->m_len;
8449 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
8450 } else {
8451 rsm->orig_m_len = 0;
8452 rsm->orig_t_space = 0;
8454 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz);
8455 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8456 /* Log a new rsm */
8457 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
8458 #ifndef INVARIANTS
8459 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
8460 #else
8461 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
8462 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8463 nrsm, insret, rack, rsm);
8465 #endif
8466 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8467 rsm->r_in_tmap = 1;
8468 if (rsm->r_flags & RACK_IS_PCM) {
8469 rack->r_ctl.pcm_i.send_time = cts;
8470 rack->r_ctl.pcm_i.eseq = rsm->r_end;
8471 /* First time through we set the start too */
8472 if (rack->pcm_in_progress == 0)
8473 rack->r_ctl.pcm_i.sseq = rsm->r_start;
8476 * Special case detection, is there just a single
8477 * packet outstanding when we are not in recovery?
8479 * If this is true mark it so.
8481 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
8482 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
8483 struct rack_sendmap *prsm;
8485 prsm = tqhash_prev(rack->r_ctl.tqh, rsm);
8486 if (prsm)
8487 prsm->r_one_out_nr = 1;
8489 return;
8492 * If we reach here its a retransmission and we need to find it.
8494 more:
8495 if (hintrsm && (hintrsm->r_start == seq_out)) {
8496 rsm = hintrsm;
8497 hintrsm = NULL;
8498 } else {
8499 /* No hints sorry */
8500 rsm = NULL;
8502 if ((rsm) && (rsm->r_start == seq_out)) {
8503 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8504 if (len == 0) {
8505 return;
8506 } else {
8507 goto more;
8510 /* Ok it was not the last pointer go through it the hard way. */
8511 refind:
8512 rsm = tqhash_find(rack->r_ctl.tqh, seq_out);
8513 if (rsm) {
8514 if (rsm->r_start == seq_out) {
8515 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8516 if (len == 0) {
8517 return;
8518 } else {
8519 goto refind;
8522 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
8523 /* Transmitted within this piece */
8525 * Ok we must split off the front and then let the
8526 * update do the rest
8528 nrsm = rack_alloc_full_limit(rack);
8529 if (nrsm == NULL) {
8530 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz);
8531 return;
8534 * copy rsm to nrsm and then trim the front of rsm
8535 * to not include this part.
8537 rack_clone_rsm(rack, nrsm, rsm, seq_out);
8538 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8539 #ifndef INVARIANTS
8540 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
8541 #else
8542 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
8543 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8544 nrsm, insret, rack, rsm);
8546 #endif
8547 if (rsm->r_in_tmap) {
8548 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8549 nrsm->r_in_tmap = 1;
8551 rsm->r_flags &= (~RACK_HAS_FIN);
8552 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz);
8553 if (len == 0) {
8554 return;
8555 } else if (len > 0)
8556 goto refind;
8560 * Hmm not found in map did they retransmit both old and on into the
8561 * new?
8563 if (seq_out == tp->snd_max) {
8564 goto again;
8565 } else if (SEQ_LT(seq_out, tp->snd_max)) {
8566 #ifdef INVARIANTS
8567 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
8568 seq_out, len, tp->snd_una, tp->snd_max);
8569 printf("Starting Dump of all rack entries\n");
8570 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
8571 printf("rsm:%p start:%u end:%u\n",
8572 rsm, rsm->r_start, rsm->r_end);
8574 printf("Dump complete\n");
8575 panic("seq_out not found rack:%p tp:%p",
8576 rack, tp);
8577 #endif
8578 } else {
8579 #ifdef INVARIANTS
8581 * Hmm beyond sndmax? (only if we are using the new rtt-pack
8582 * flag)
8584 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
8585 seq_out, len, tp->snd_max, tp);
8586 #endif
8591 * Record one of the RTT updates from an ack into
8592 * our sample structure.
8595 static void
8596 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
8597 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
8599 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8600 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
8601 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
8603 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8604 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
8605 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
8607 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
8608 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
8609 rack->r_ctl.rc_gp_lowrtt = us_rtt;
8610 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
8611 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
8613 if ((confidence == 1) &&
8614 ((rsm == NULL) ||
8615 (rsm->r_just_ret) ||
8616 (rsm->r_one_out_nr &&
8617 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
8619 * If the rsm had a just return
8620 * hit it then we can't trust the
8621 * rtt measurement for buffer deterimination
8622 * Note that a confidence of 2, indicates
8623 * SACK'd which overrides the r_just_ret or
8624 * the r_one_out_nr. If it was a CUM-ACK and
8625 * we had only two outstanding, but get an
8626 * ack for only 1. Then that also lowers our
8627 * confidence.
8629 confidence = 0;
8631 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
8632 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
8633 if (rack->r_ctl.rack_rs.confidence == 0) {
8635 * We take anything with no current confidence
8636 * saved.
8638 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
8639 rack->r_ctl.rack_rs.confidence = confidence;
8640 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
8641 } else if (confidence != 0) {
8643 * Once we have a confident number,
8644 * we can update it with a smaller
8645 * value since this confident number
8646 * may include the DSACK time until
8647 * the next segment (the second one) arrived.
8649 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
8650 rack->r_ctl.rack_rs.confidence = confidence;
8651 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
8654 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
8655 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
8656 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
8657 rack->r_ctl.rack_rs.rs_rtt_cnt++;
8661 * Collect new round-trip time estimate
8662 * and update averages and current timeout.
8664 static void
8665 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
8667 int32_t delta;
8668 int32_t rtt;
8670 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
8671 /* No valid sample */
8672 return;
8673 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
8674 /* We are to use the lowest RTT seen in a single ack */
8675 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
8676 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
8677 /* We are to use the highest RTT seen in a single ack */
8678 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
8679 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
8680 /* We are to use the average RTT seen in a single ack */
8681 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
8682 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
8683 } else {
8684 #ifdef INVARIANTS
8685 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
8686 #endif
8687 return;
8689 if (rtt == 0)
8690 rtt = 1;
8691 if (rack->rc_gp_rtt_set == 0) {
8693 * With no RTT we have to accept
8694 * even one we are not confident of.
8696 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
8697 rack->rc_gp_rtt_set = 1;
8698 } else if (rack->r_ctl.rack_rs.confidence) {
8699 /* update the running gp srtt */
8700 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
8701 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
8703 if (rack->r_ctl.rack_rs.confidence) {
8705 * record the low and high for highly buffered path computation,
8706 * we only do this if we are confident (not a retransmission).
8708 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
8709 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8711 if (rack->rc_highly_buffered == 0) {
8713 * Currently once we declare a path has
8714 * highly buffered there is no going
8715 * back, which may be a problem...
8717 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
8718 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
8719 rack->r_ctl.rc_highest_us_rtt,
8720 rack->r_ctl.rc_lowest_us_rtt,
8721 RACK_RTTS_SEEHBP);
8722 rack->rc_highly_buffered = 1;
8726 if ((rack->r_ctl.rack_rs.confidence) ||
8727 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
8729 * If we are highly confident of it <or> it was
8730 * never retransmitted we accept it as the last us_rtt.
8732 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8733 /* The lowest rtt can be set if its was not retransmited */
8734 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
8735 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
8736 if (rack->r_ctl.rc_lowest_us_rtt == 0)
8737 rack->r_ctl.rc_lowest_us_rtt = 1;
8740 rack = (struct tcp_rack *)tp->t_fb_ptr;
8741 if (tp->t_srtt != 0) {
8743 * We keep a simple srtt in microseconds, like our rtt
8744 * measurement. We don't need to do any tricks with shifting
8745 * etc. Instead we just add in 1/8th of the new measurement
8746 * and subtract out 1/8 of the old srtt. We do the same with
8747 * the variance after finding the absolute value of the
8748 * difference between this sample and the current srtt.
8750 delta = tp->t_srtt - rtt;
8751 /* Take off 1/8th of the current sRTT */
8752 tp->t_srtt -= (tp->t_srtt >> 3);
8753 /* Add in 1/8th of the new RTT just measured */
8754 tp->t_srtt += (rtt >> 3);
8755 if (tp->t_srtt <= 0)
8756 tp->t_srtt = 1;
8757 /* Now lets make the absolute value of the variance */
8758 if (delta < 0)
8759 delta = -delta;
8760 /* Subtract out 1/8th */
8761 tp->t_rttvar -= (tp->t_rttvar >> 3);
8762 /* Add in 1/8th of the new variance we just saw */
8763 tp->t_rttvar += (delta >> 3);
8764 if (tp->t_rttvar <= 0)
8765 tp->t_rttvar = 1;
8766 } else {
8768 * No rtt measurement yet - use the unsmoothed rtt. Set the
8769 * variance to half the rtt (so our first retransmit happens
8770 * at 3*rtt).
8772 tp->t_srtt = rtt;
8773 tp->t_rttvar = rtt >> 1;
8775 rack->rc_srtt_measure_made = 1;
8776 KMOD_TCPSTAT_INC(tcps_rttupdated);
8777 if (tp->t_rttupdated < UCHAR_MAX)
8778 tp->t_rttupdated++;
8779 #ifdef STATS
8780 if (rack_stats_gets_ms_rtt == 0) {
8781 /* Send in the microsecond rtt used for rxt timeout purposes */
8782 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
8783 } else if (rack_stats_gets_ms_rtt == 1) {
8784 /* Send in the millisecond rtt used for rxt timeout purposes */
8785 int32_t ms_rtt;
8787 /* Round up */
8788 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
8789 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
8790 } else if (rack_stats_gets_ms_rtt == 2) {
8791 /* Send in the millisecond rtt has close to the path RTT as we can get */
8792 int32_t ms_rtt;
8794 /* Round up */
8795 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
8796 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
8797 } else {
8798 /* Send in the microsecond rtt has close to the path RTT as we can get */
8799 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
8801 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
8802 #endif
8803 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
8805 * the retransmit should happen at rtt + 4 * rttvar. Because of the
8806 * way we do the smoothing, srtt and rttvar will each average +1/2
8807 * tick of bias. When we compute the retransmit timer, we want 1/2
8808 * tick of rounding and 1 extra tick because of +-1/2 tick
8809 * uncertainty in the firing of the timer. The bias will give us
8810 * exactly the 1.5 tick we need. But, because the bias is
8811 * statistical, we have to test that we don't drop below the minimum
8812 * feasible timer (which is 2 ticks).
8814 tp->t_rxtshift = 0;
8815 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
8816 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
8817 rack_log_rtt_sample(rack, rtt);
8818 tp->t_softerror = 0;
8822 static void
8823 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
8826 * Apply to filter the inbound us-rtt at us_cts.
8828 uint32_t old_rtt;
8830 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
8831 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
8832 us_rtt, us_cts);
8833 if (old_rtt > us_rtt) {
8834 /* We just hit a new lower rtt time */
8835 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
8836 __LINE__, RACK_RTTS_NEWRTT);
8838 * Only count it if its lower than what we saw within our
8839 * calculated range.
8841 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
8842 if (rack_probertt_lower_within &&
8843 rack->rc_gp_dyn_mul &&
8844 (rack->use_fixed_rate == 0) &&
8845 (rack->rc_always_pace)) {
8847 * We are seeing a new lower rtt very close
8848 * to the time that we would have entered probe-rtt.
8849 * This is probably due to the fact that a peer flow
8850 * has entered probe-rtt. Lets go in now too.
8852 uint32_t val;
8854 val = rack_probertt_lower_within * rack_time_between_probertt;
8855 val /= 100;
8856 if ((rack->in_probe_rtt == 0) &&
8857 (rack->rc_skip_timely == 0) &&
8858 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
8859 rack_enter_probertt(rack, us_cts);
8862 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
8867 static int
8868 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
8869 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
8871 uint32_t us_rtt;
8872 int32_t i, all;
8873 uint32_t t, len_acked;
8875 if ((rsm->r_flags & RACK_ACKED) ||
8876 (rsm->r_flags & RACK_WAS_ACKED))
8877 /* Already done */
8878 return (0);
8879 if (rsm->r_no_rtt_allowed) {
8880 /* Not allowed */
8881 return (0);
8883 if (ack_type == CUM_ACKED) {
8884 if (SEQ_GT(th_ack, rsm->r_end)) {
8885 len_acked = rsm->r_end - rsm->r_start;
8886 all = 1;
8887 } else {
8888 len_acked = th_ack - rsm->r_start;
8889 all = 0;
8891 } else {
8892 len_acked = rsm->r_end - rsm->r_start;
8893 all = 0;
8895 if (rsm->r_rtr_cnt == 1) {
8897 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8898 if ((int)t <= 0)
8899 t = 1;
8900 if (!tp->t_rttlow || tp->t_rttlow > t)
8901 tp->t_rttlow = t;
8902 if (!rack->r_ctl.rc_rack_min_rtt ||
8903 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
8904 rack->r_ctl.rc_rack_min_rtt = t;
8905 if (rack->r_ctl.rc_rack_min_rtt == 0) {
8906 rack->r_ctl.rc_rack_min_rtt = 1;
8909 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
8910 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8911 else
8912 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8913 if (us_rtt == 0)
8914 us_rtt = 1;
8915 if (CC_ALGO(tp)->rttsample != NULL) {
8916 /* Kick the RTT to the CC */
8917 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
8919 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
8920 if (ack_type == SACKED) {
8921 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
8922 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
8923 } else {
8925 * We need to setup what our confidence
8926 * is in this ack.
8928 * If the rsm was app limited and it is
8929 * less than a mss in length (the end
8930 * of the send) then we have a gap. If we
8931 * were app limited but say we were sending
8932 * multiple MSS's then we are more confident
8933 * int it.
8935 * When we are not app-limited then we see if
8936 * the rsm is being included in the current
8937 * measurement, we tell this by the app_limited_needs_set
8938 * flag.
8940 * Note that being cwnd blocked is not applimited
8941 * as well as the pacing delay between packets which
8942 * are sending only 1 or 2 MSS's also will show up
8943 * in the RTT. We probably need to examine this algorithm
8944 * a bit more and enhance it to account for the delay
8945 * between rsm's. We could do that by saving off the
8946 * pacing delay of each rsm (in an rsm) and then
8947 * factoring that in somehow though for now I am
8948 * not sure how :)
8950 int calc_conf = 0;
8952 if (rsm->r_flags & RACK_APP_LIMITED) {
8953 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
8954 calc_conf = 0;
8955 else
8956 calc_conf = 1;
8957 } else if (rack->app_limited_needs_set == 0) {
8958 calc_conf = 1;
8959 } else {
8960 calc_conf = 0;
8962 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
8963 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
8964 calc_conf, rsm, rsm->r_rtr_cnt);
8966 if ((rsm->r_flags & RACK_TLP) &&
8967 (!IN_FASTRECOVERY(tp->t_flags))) {
8968 /* Segment was a TLP and our retrans matched */
8969 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
8970 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__);
8973 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
8974 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
8975 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
8976 /* New more recent rack_tmit_time */
8977 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8978 if (rack->r_ctl.rc_rack_tmit_time == 0)
8979 rack->r_ctl.rc_rack_tmit_time = 1;
8980 rack->rc_rack_rtt = t;
8982 return (1);
8985 * We clear the soft/rxtshift since we got an ack.
8986 * There is no assurance we will call the commit() function
8987 * so we need to clear these to avoid incorrect handling.
8989 tp->t_rxtshift = 0;
8990 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
8991 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
8992 tp->t_softerror = 0;
8993 if (to && (to->to_flags & TOF_TS) &&
8994 (ack_type == CUM_ACKED) &&
8995 (to->to_tsecr) &&
8996 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
8998 * Now which timestamp does it match? In this block the ACK
8999 * must be coming from a previous transmission.
9001 for (i = 0; i < rsm->r_rtr_cnt; i++) {
9002 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
9003 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
9004 if ((int)t <= 0)
9005 t = 1;
9006 if (CC_ALGO(tp)->rttsample != NULL) {
9008 * Kick the RTT to the CC, here
9009 * we lie a bit in that we know the
9010 * retransmission is correct even though
9011 * we retransmitted. This is because
9012 * we match the timestamps.
9014 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
9015 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
9016 else
9017 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
9018 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
9020 if ((i + 1) < rsm->r_rtr_cnt) {
9022 * The peer ack'd from our previous
9023 * transmission. We have a spurious
9024 * retransmission and thus we dont
9025 * want to update our rack_rtt.
9027 * Hmm should there be a CC revert here?
9030 return (0);
9032 if (!tp->t_rttlow || tp->t_rttlow > t)
9033 tp->t_rttlow = t;
9034 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9035 rack->r_ctl.rc_rack_min_rtt = t;
9036 if (rack->r_ctl.rc_rack_min_rtt == 0) {
9037 rack->r_ctl.rc_rack_min_rtt = 1;
9040 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
9041 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
9042 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
9043 /* New more recent rack_tmit_time */
9044 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
9045 if (rack->r_ctl.rc_rack_tmit_time == 0)
9046 rack->r_ctl.rc_rack_tmit_time = 1;
9047 rack->rc_rack_rtt = t;
9049 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
9050 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
9051 rsm->r_rtr_cnt);
9052 return (1);
9055 /* If we are logging log out the sendmap */
9056 if (tcp_bblogging_on(rack->rc_tp)) {
9057 for (i = 0; i < rsm->r_rtr_cnt; i++) {
9058 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr);
9061 goto ts_not_found;
9062 } else {
9064 * Ok its a SACK block that we retransmitted. or a windows
9065 * machine without timestamps. We can tell nothing from the
9066 * time-stamp since its not there or the time the peer last
9067 * received a segment that moved forward its cum-ack point.
9069 ts_not_found:
9070 i = rsm->r_rtr_cnt - 1;
9071 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
9072 if ((int)t <= 0)
9073 t = 1;
9074 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9076 * We retransmitted and the ack came back in less
9077 * than the smallest rtt we have observed. We most
9078 * likely did an improper retransmit as outlined in
9079 * 6.2 Step 2 point 2 in the rack-draft so we
9080 * don't want to update our rack_rtt. We in
9081 * theory (in future) might want to think about reverting our
9082 * cwnd state but we won't for now.
9084 return (0);
9085 } else if (rack->r_ctl.rc_rack_min_rtt) {
9087 * We retransmitted it and the retransmit did the
9088 * job.
9090 if (!rack->r_ctl.rc_rack_min_rtt ||
9091 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
9092 rack->r_ctl.rc_rack_min_rtt = t;
9093 if (rack->r_ctl.rc_rack_min_rtt == 0) {
9094 rack->r_ctl.rc_rack_min_rtt = 1;
9097 if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
9098 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
9099 (uint32_t)rsm->r_tim_lastsent[i]))) {
9100 /* New more recent rack_tmit_time */
9101 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
9102 if (rack->r_ctl.rc_rack_tmit_time == 0)
9103 rack->r_ctl.rc_rack_tmit_time = 1;
9104 rack->rc_rack_rtt = t;
9106 return (1);
9109 return (0);
9113 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
9115 static void
9116 rack_log_sack_passed(struct tcpcb *tp,
9117 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts)
9119 struct rack_sendmap *nrsm;
9120 uint32_t thresh;
9122 /* Get our rxt threshold for lost consideration */
9123 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0);
9124 /* Now start looking at rsm's */
9125 nrsm = rsm;
9126 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
9127 rack_head, r_tnext) {
9128 if (nrsm == rsm) {
9129 /* Skip original segment he is acked */
9130 continue;
9132 if (nrsm->r_flags & RACK_ACKED) {
9134 * Skip ack'd segments, though we
9135 * should not see these, since tmap
9136 * should not have ack'd segments.
9138 continue;
9140 if (nrsm->r_flags & RACK_RWND_COLLAPSED) {
9142 * If the peer dropped the rwnd on
9143 * these then we don't worry about them.
9145 continue;
9147 /* Check lost state */
9148 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) {
9149 uint32_t exp;
9151 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh;
9152 if (TSTMP_LT(exp, cts) || (exp == cts)) {
9153 /* We consider it lost */
9154 nrsm->r_flags |= RACK_WAS_LOST;
9155 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start;
9158 if (nrsm->r_flags & RACK_SACK_PASSED) {
9160 * We found one that is already marked
9161 * passed, we have been here before and
9162 * so all others below this are marked.
9164 break;
9166 nrsm->r_flags |= RACK_SACK_PASSED;
9167 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
9171 static void
9172 rack_need_set_test(struct tcpcb *tp,
9173 struct tcp_rack *rack,
9174 struct rack_sendmap *rsm,
9175 tcp_seq th_ack,
9176 int line,
9177 int use_which)
9179 struct rack_sendmap *s_rsm;
9181 if ((tp->t_flags & TF_GPUTINPROG) &&
9182 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9184 * We were app limited, and this ack
9185 * butts up or goes beyond the point where we want
9186 * to start our next measurement. We need
9187 * to record the new gput_ts as here and
9188 * possibly update the start sequence.
9190 uint32_t seq, ts;
9192 if (rsm->r_rtr_cnt > 1) {
9194 * This is a retransmit, can we
9195 * really make any assessment at this
9196 * point? We are not really sure of
9197 * the timestamp, is it this or the
9198 * previous transmission?
9200 * Lets wait for something better that
9201 * is not retransmitted.
9203 return;
9205 seq = tp->gput_seq;
9206 ts = tp->gput_ts;
9207 rack->app_limited_needs_set = 0;
9208 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
9209 /* Do we start at a new end? */
9210 if ((use_which == RACK_USE_BEG) &&
9211 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
9213 * When we get an ACK that just eats
9214 * up some of the rsm, we set RACK_USE_BEG
9215 * since whats at r_start (i.e. th_ack)
9216 * is left unacked and thats where the
9217 * measurement now starts.
9219 tp->gput_seq = rsm->r_start;
9221 if ((use_which == RACK_USE_END) &&
9222 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9224 * We use the end when the cumack
9225 * is moving forward and completely
9226 * deleting the rsm passed so basically
9227 * r_end holds th_ack.
9229 * For SACK's we also want to use the end
9230 * since this piece just got sacked and
9231 * we want to target anything after that
9232 * in our measurement.
9234 tp->gput_seq = rsm->r_end;
9236 if (use_which == RACK_USE_END_OR_THACK) {
9238 * special case for ack moving forward,
9239 * not a sack, we need to move all the
9240 * way up to where this ack cum-ack moves
9241 * to.
9243 if (SEQ_GT(th_ack, rsm->r_end))
9244 tp->gput_seq = th_ack;
9245 else
9246 tp->gput_seq = rsm->r_end;
9248 if (SEQ_LT(tp->gput_seq, tp->snd_max))
9249 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
9250 else
9251 s_rsm = NULL;
9253 * Pick up the correct send time if we can the rsm passed in
9254 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other
9255 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will
9256 * find a different seq i.e. the next send up.
9258 * If that has not been sent, s_rsm will be NULL and we must
9259 * arrange it so this function will get called again by setting
9260 * app_limited_needs_set.
9262 if (s_rsm)
9263 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0];
9264 else {
9265 /* If we hit here we have to have *not* sent tp->gput_seq */
9266 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
9267 /* Set it up so we will go through here again */
9268 rack->app_limited_needs_set = 1;
9270 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
9272 * We moved beyond this guy's range, re-calculate
9273 * the new end point.
9275 if (rack->rc_gp_filled == 0) {
9276 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
9277 } else {
9278 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
9282 * We are moving the goal post, we may be able to clear the
9283 * measure_saw_probe_rtt flag.
9285 if ((rack->in_probe_rtt == 0) &&
9286 (rack->measure_saw_probe_rtt) &&
9287 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
9288 rack->measure_saw_probe_rtt = 0;
9289 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
9290 seq, tp->gput_seq,
9291 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
9292 (uint64_t)rack->r_ctl.rc_gp_output_ts),
9293 5, line, NULL, 0);
9294 if (rack->rc_gp_filled &&
9295 ((tp->gput_ack - tp->gput_seq) <
9296 max(rc_init_window(rack), (MIN_GP_WIN *
9297 ctf_fixed_maxseg(tp))))) {
9298 uint32_t ideal_amount;
9300 ideal_amount = rack_get_measure_window(tp, rack);
9301 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) {
9303 * There is no sense of continuing this measurement
9304 * because its too small to gain us anything we
9305 * trust. Skip it and that way we can start a new
9306 * measurement quicker.
9308 tp->t_flags &= ~TF_GPUTINPROG;
9309 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
9310 0, 0,
9311 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
9312 (uint64_t)rack->r_ctl.rc_gp_output_ts),
9313 6, __LINE__, NULL, 0);
9314 } else {
9316 * Reset the window further out.
9318 tp->gput_ack = tp->gput_seq + ideal_amount;
9321 rack_tend_gp_marks(tp, rack);
9322 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm);
9326 static inline int
9327 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
9329 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
9330 /* Behind our TLP definition or right at */
9331 return (0);
9333 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
9334 /* The start is beyond or right at our end of TLP definition */
9335 return (0);
9337 /* It has to be a sub-part of the original TLP recorded */
9338 return (1);
9341 static uint32_t
9342 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
9343 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts,
9344 uint32_t segsiz)
9346 uint32_t start, end, changed = 0;
9347 struct rack_sendmap stack_map;
9348 struct rack_sendmap *rsm, *nrsm, *prev, *next;
9349 int insret __diagused;
9350 int32_t used_ref = 1;
9351 int can_use_hookery = 0;
9353 start = sack->start;
9354 end = sack->end;
9355 rsm = *prsm;
9357 do_rest_ofb:
9358 if ((rsm == NULL) ||
9359 (SEQ_LT(end, rsm->r_start)) ||
9360 (SEQ_GEQ(start, rsm->r_end)) ||
9361 (SEQ_LT(start, rsm->r_start))) {
9363 * We are not in the right spot,
9364 * find the correct spot in the tree.
9366 used_ref = 0;
9367 rsm = tqhash_find(rack->r_ctl.tqh, start);
9369 if (rsm == NULL) {
9370 /* TSNH */
9371 goto out;
9373 /* Ok we have an ACK for some piece of this rsm */
9374 if (rsm->r_start != start) {
9375 if ((rsm->r_flags & RACK_ACKED) == 0) {
9377 * Before any splitting or hookery is
9378 * done is it a TLP of interest i.e. rxt?
9380 if ((rsm->r_flags & RACK_TLP) &&
9381 (rsm->r_rtr_cnt > 1)) {
9383 * We are splitting a rxt TLP, check
9384 * if we need to save off the start/end
9386 if (rack->rc_last_tlp_acked_set &&
9387 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9389 * We already turned this on since we are inside
9390 * the previous one was a partially sack now we
9391 * are getting another one (maybe all of it).
9394 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9396 * Lets make sure we have all of it though.
9398 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9399 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9400 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9401 rack->r_ctl.last_tlp_acked_end);
9403 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9404 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9405 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9406 rack->r_ctl.last_tlp_acked_end);
9408 } else {
9409 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9410 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9411 rack->rc_last_tlp_past_cumack = 0;
9412 rack->rc_last_tlp_acked_set = 1;
9413 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9417 * Need to split this in two pieces the before and after,
9418 * the before remains in the map, the after must be
9419 * added. In other words we have:
9420 * rsm |--------------|
9421 * sackblk |------->
9422 * rsm will become
9423 * rsm |---|
9424 * and nrsm will be the sacked piece
9425 * nrsm |----------|
9427 * But before we start down that path lets
9428 * see if the sack spans over on top of
9429 * the next guy and it is already sacked.
9433 * Hookery can only be used if the two entries
9434 * are in the same bucket and neither one of
9435 * them staddle the bucket line.
9437 next = tqhash_next(rack->r_ctl.tqh, rsm);
9438 if (next &&
9439 (rsm->bindex == next->bindex) &&
9440 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9441 ((next->r_flags & RACK_STRADDLE) == 0) &&
9442 ((rsm->r_flags & RACK_IS_PCM) == 0) &&
9443 ((next->r_flags & RACK_IS_PCM) == 0) &&
9444 (rsm->r_flags & RACK_IN_GP_WIN) &&
9445 (next->r_flags & RACK_IN_GP_WIN))
9446 can_use_hookery = 1;
9447 else
9448 can_use_hookery = 0;
9449 if (next && can_use_hookery &&
9450 (next->r_flags & RACK_ACKED) &&
9451 SEQ_GEQ(end, next->r_start)) {
9453 * So the next one is already acked, and
9454 * we can thus by hookery use our stack_map
9455 * to reflect the piece being sacked and
9456 * then adjust the two tree entries moving
9457 * the start and ends around. So we start like:
9458 * rsm |------------| (not-acked)
9459 * next |-----------| (acked)
9460 * sackblk |-------->
9461 * We want to end like so:
9462 * rsm |------| (not-acked)
9463 * next |-----------------| (acked)
9464 * nrsm |-----|
9465 * Where nrsm is a temporary stack piece we
9466 * use to update all the gizmos.
9468 /* Copy up our fudge block */
9469 nrsm = &stack_map;
9470 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9471 /* Now adjust our tree blocks */
9472 tqhash_update_end(rack->r_ctl.tqh, rsm, start);
9473 next->r_start = start;
9474 rsm->r_flags |= RACK_SHUFFLED;
9475 next->r_flags |= RACK_SHUFFLED;
9476 /* Now we must adjust back where next->m is */
9477 rack_setup_offset_for_rsm(rack, rsm, next);
9479 * Which timestamp do we keep? It is rather
9480 * important in GP measurements to have the
9481 * accurate end of the send window.
9483 * We keep the largest value, which is the newest
9484 * send. We do this in case a segment that is
9485 * joined together and not part of a GP estimate
9486 * later gets expanded into the GP estimate.
9488 * We prohibit the merging of unlike kinds i.e.
9489 * all pieces that are in the GP estimate can be
9490 * merged and all pieces that are not in a GP estimate
9491 * can be merged, but not disimilar pieces. Combine
9492 * this with taking the highest here and we should
9493 * be ok unless of course the client reneges. Then
9494 * all bets are off.
9496 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] <
9497 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)])
9498 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)];
9500 * And we must keep the newest ack arrival time.
9502 if (next->r_ack_arrival <
9503 rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
9504 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9507 /* We don't need to adjust rsm, it did not change */
9508 /* Clear out the dup ack count of the remainder */
9509 rsm->r_dupack = 0;
9510 rsm->r_just_ret = 0;
9511 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9512 /* Now lets make sure our fudge block is right */
9513 nrsm->r_start = start;
9514 /* Now lets update all the stats and such */
9515 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
9516 if (rack->app_limited_needs_set)
9517 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
9518 changed += (nrsm->r_end - nrsm->r_start);
9519 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
9520 if (rsm->r_flags & RACK_WAS_LOST) {
9521 int my_chg;
9523 my_chg = (nrsm->r_end - nrsm->r_start);
9524 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
9525 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9526 if (my_chg <= rack->r_ctl.rc_considered_lost)
9527 rack->r_ctl.rc_considered_lost -= my_chg;
9528 else
9529 rack->r_ctl.rc_considered_lost = 0;
9531 if (nrsm->r_flags & RACK_SACK_PASSED) {
9532 rack->r_ctl.rc_reorder_ts = cts;
9533 if (rack->r_ctl.rc_reorder_ts == 0)
9534 rack->r_ctl.rc_reorder_ts = 1;
9537 * Now we want to go up from rsm (the
9538 * one left un-acked) to the next one
9539 * in the tmap. We do this so when
9540 * we walk backwards we include marking
9541 * sack-passed on rsm (The one passed in
9542 * is skipped since it is generally called
9543 * on something sacked before removing it
9544 * from the tmap).
9546 if (rsm->r_in_tmap) {
9547 nrsm = TAILQ_NEXT(rsm, r_tnext);
9549 * Now that we have the next
9550 * one walk backwards from there.
9552 if (nrsm && nrsm->r_in_tmap)
9553 rack_log_sack_passed(tp, rack, nrsm, cts);
9555 /* Now are we done? */
9556 if (SEQ_LT(end, next->r_end) ||
9557 (end == next->r_end)) {
9558 /* Done with block */
9559 goto out;
9561 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
9562 counter_u64_add(rack_sack_used_next_merge, 1);
9563 /* Postion for the next block */
9564 start = next->r_end;
9565 rsm = tqhash_next(rack->r_ctl.tqh, next);
9566 if (rsm == NULL)
9567 goto out;
9568 } else {
9570 * We can't use any hookery here, so we
9571 * need to split the map. We enter like
9572 * so:
9573 * rsm |--------|
9574 * sackblk |----->
9575 * We will add the new block nrsm and
9576 * that will be the new portion, and then
9577 * fall through after reseting rsm. So we
9578 * split and look like this:
9579 * rsm |----|
9580 * sackblk |----->
9581 * nrsm |---|
9582 * We then fall through reseting
9583 * rsm to nrsm, so the next block
9584 * picks it up.
9586 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9587 if (nrsm == NULL) {
9589 * failed XXXrrs what can we do but loose the sack
9590 * info?
9592 goto out;
9594 counter_u64_add(rack_sack_splits, 1);
9595 rack_clone_rsm(rack, nrsm, rsm, start);
9596 rsm->r_just_ret = 0;
9597 #ifndef INVARIANTS
9598 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
9599 #else
9600 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
9601 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
9602 nrsm, insret, rack, rsm);
9604 #endif
9605 if (rsm->r_in_tmap) {
9606 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9607 nrsm->r_in_tmap = 1;
9609 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
9610 rsm->r_flags &= (~RACK_HAS_FIN);
9611 /* Position us to point to the new nrsm that starts the sack blk */
9612 rsm = nrsm;
9614 } else {
9615 /* Already sacked this piece */
9616 counter_u64_add(rack_sack_skipped_acked, 1);
9617 if (end == rsm->r_end) {
9618 /* Done with block */
9619 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9620 goto out;
9621 } else if (SEQ_LT(end, rsm->r_end)) {
9622 /* A partial sack to a already sacked block */
9623 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9624 goto out;
9625 } else {
9627 * The end goes beyond this guy
9628 * reposition the start to the
9629 * next block.
9631 start = rsm->r_end;
9632 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9633 if (rsm == NULL)
9634 goto out;
9638 if (SEQ_GEQ(end, rsm->r_end)) {
9640 * The end of this block is either beyond this guy or right
9641 * at this guy. I.e.:
9642 * rsm --- |-----|
9643 * end |-----|
9644 * <or>
9645 * end |---------|
9647 if ((rsm->r_flags & RACK_ACKED) == 0) {
9649 * Is it a TLP of interest?
9651 if ((rsm->r_flags & RACK_TLP) &&
9652 (rsm->r_rtr_cnt > 1)) {
9654 * We are splitting a rxt TLP, check
9655 * if we need to save off the start/end
9657 if (rack->rc_last_tlp_acked_set &&
9658 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9660 * We already turned this on since we are inside
9661 * the previous one was a partially sack now we
9662 * are getting another one (maybe all of it).
9664 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9666 * Lets make sure we have all of it though.
9668 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9669 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9670 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9671 rack->r_ctl.last_tlp_acked_end);
9673 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9674 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9675 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9676 rack->r_ctl.last_tlp_acked_end);
9678 } else {
9679 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9680 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9681 rack->rc_last_tlp_past_cumack = 0;
9682 rack->rc_last_tlp_acked_set = 1;
9683 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9686 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
9687 changed += (rsm->r_end - rsm->r_start);
9688 /* You get a count for acking a whole segment or more */
9689 if (rsm->r_flags & RACK_WAS_LOST) {
9690 int my_chg;
9692 my_chg = (rsm->r_end - rsm->r_start);
9693 rsm->r_flags &= ~RACK_WAS_LOST;
9694 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
9695 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9696 if (my_chg <= rack->r_ctl.rc_considered_lost)
9697 rack->r_ctl.rc_considered_lost -= my_chg;
9698 else
9699 rack->r_ctl.rc_considered_lost = 0;
9701 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
9702 if (rsm->r_in_tmap) /* should be true */
9703 rack_log_sack_passed(tp, rack, rsm, cts);
9704 /* Is Reordering occuring? */
9705 if (rsm->r_flags & RACK_SACK_PASSED) {
9706 rsm->r_flags &= ~RACK_SACK_PASSED;
9707 rack->r_ctl.rc_reorder_ts = cts;
9708 if (rack->r_ctl.rc_reorder_ts == 0)
9709 rack->r_ctl.rc_reorder_ts = 1;
9711 if (rack->app_limited_needs_set)
9712 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
9713 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9714 rsm->r_flags |= RACK_ACKED;
9715 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end);
9716 if (rsm->r_in_tmap) {
9717 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
9718 rsm->r_in_tmap = 0;
9720 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
9721 } else {
9722 counter_u64_add(rack_sack_skipped_acked, 1);
9724 if (end == rsm->r_end) {
9725 /* This block only - done, setup for next */
9726 goto out;
9729 * There is more not coverend by this rsm move on
9730 * to the next block in the tail queue hash table.
9732 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
9733 start = rsm->r_end;
9734 rsm = nrsm;
9735 if (rsm == NULL)
9736 goto out;
9737 goto do_rest_ofb;
9740 * The end of this sack block is smaller than
9741 * our rsm i.e.:
9742 * rsm --- |-----|
9743 * end |--|
9745 if ((rsm->r_flags & RACK_ACKED) == 0) {
9747 * Is it a TLP of interest?
9749 if ((rsm->r_flags & RACK_TLP) &&
9750 (rsm->r_rtr_cnt > 1)) {
9752 * We are splitting a rxt TLP, check
9753 * if we need to save off the start/end
9755 if (rack->rc_last_tlp_acked_set &&
9756 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9758 * We already turned this on since we are inside
9759 * the previous one was a partially sack now we
9760 * are getting another one (maybe all of it).
9762 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9764 * Lets make sure we have all of it though.
9766 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9767 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9768 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9769 rack->r_ctl.last_tlp_acked_end);
9771 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9772 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9773 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9774 rack->r_ctl.last_tlp_acked_end);
9776 } else {
9777 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9778 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9779 rack->rc_last_tlp_past_cumack = 0;
9780 rack->rc_last_tlp_acked_set = 1;
9781 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9785 * Hookery can only be used if the two entries
9786 * are in the same bucket and neither one of
9787 * them staddle the bucket line.
9789 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
9790 if (prev &&
9791 (rsm->bindex == prev->bindex) &&
9792 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9793 ((prev->r_flags & RACK_STRADDLE) == 0) &&
9794 ((rsm->r_flags & RACK_IS_PCM) == 0) &&
9795 ((prev->r_flags & RACK_IS_PCM) == 0) &&
9796 (rsm->r_flags & RACK_IN_GP_WIN) &&
9797 (prev->r_flags & RACK_IN_GP_WIN))
9798 can_use_hookery = 1;
9799 else
9800 can_use_hookery = 0;
9801 if (prev && can_use_hookery &&
9802 (prev->r_flags & RACK_ACKED)) {
9804 * Goal, we want the right remainder of rsm to shrink
9805 * in place and span from (rsm->r_start = end) to rsm->r_end.
9806 * We want to expand prev to go all the way
9807 * to prev->r_end <- end.
9808 * so in the tree we have before:
9809 * prev |--------| (acked)
9810 * rsm |-------| (non-acked)
9811 * sackblk |-|
9812 * We churn it so we end up with
9813 * prev |----------| (acked)
9814 * rsm |-----| (non-acked)
9815 * nrsm |-| (temporary)
9817 * Note if either prev/rsm is a TLP we don't
9818 * do this.
9820 nrsm = &stack_map;
9821 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9822 tqhash_update_end(rack->r_ctl.tqh, prev, end);
9823 rsm->r_start = end;
9824 rsm->r_flags |= RACK_SHUFFLED;
9825 prev->r_flags |= RACK_SHUFFLED;
9826 /* Now adjust nrsm (stack copy) to be
9827 * the one that is the small
9828 * piece that was "sacked".
9830 nrsm->r_end = end;
9831 rsm->r_dupack = 0;
9833 * Which timestamp do we keep? It is rather
9834 * important in GP measurements to have the
9835 * accurate end of the send window.
9837 * We keep the largest value, which is the newest
9838 * send. We do this in case a segment that is
9839 * joined together and not part of a GP estimate
9840 * later gets expanded into the GP estimate.
9842 * We prohibit the merging of unlike kinds i.e.
9843 * all pieces that are in the GP estimate can be
9844 * merged and all pieces that are not in a GP estimate
9845 * can be merged, but not disimilar pieces. Combine
9846 * this with taking the highest here and we should
9847 * be ok unless of course the client reneges. Then
9848 * all bets are off.
9850 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] <
9851 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) {
9852 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9855 * And we must keep the newest ack arrival time.
9858 if(prev->r_ack_arrival <
9859 rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
9860 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9862 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9864 * Now that the rsm has had its start moved forward
9865 * lets go ahead and get its new place in the world.
9867 rack_setup_offset_for_rsm(rack, prev, rsm);
9869 * Now nrsm is our new little piece
9870 * that is acked (which was merged
9871 * to prev). Update the rtt and changed
9872 * based on that. Also check for reordering.
9874 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
9875 if (rack->app_limited_needs_set)
9876 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
9877 changed += (nrsm->r_end - nrsm->r_start);
9878 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
9879 if (rsm->r_flags & RACK_WAS_LOST) {
9880 int my_chg;
9882 my_chg = (nrsm->r_end - nrsm->r_start);
9883 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
9884 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9885 if (my_chg <= rack->r_ctl.rc_considered_lost)
9886 rack->r_ctl.rc_considered_lost -= my_chg;
9887 else
9888 rack->r_ctl.rc_considered_lost = 0;
9890 if (nrsm->r_flags & RACK_SACK_PASSED) {
9891 rack->r_ctl.rc_reorder_ts = cts;
9892 if (rack->r_ctl.rc_reorder_ts == 0)
9893 rack->r_ctl.rc_reorder_ts = 1;
9895 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
9896 rsm = prev;
9897 counter_u64_add(rack_sack_used_prev_merge, 1);
9898 } else {
9900 * This is the case where our previous
9901 * block is not acked either, so we must
9902 * split the block in two.
9904 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9905 if (nrsm == NULL) {
9906 /* failed rrs what can we do but loose the sack info? */
9907 goto out;
9909 if ((rsm->r_flags & RACK_TLP) &&
9910 (rsm->r_rtr_cnt > 1)) {
9912 * We are splitting a rxt TLP, check
9913 * if we need to save off the start/end
9915 if (rack->rc_last_tlp_acked_set &&
9916 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9918 * We already turned this on since this block is inside
9919 * the previous one was a partially sack now we
9920 * are getting another one (maybe all of it).
9922 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9924 * Lets make sure we have all of it though.
9926 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9927 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9928 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9929 rack->r_ctl.last_tlp_acked_end);
9931 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9932 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9933 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
9934 rack->r_ctl.last_tlp_acked_end);
9936 } else {
9937 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9938 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9939 rack->rc_last_tlp_acked_set = 1;
9940 rack->rc_last_tlp_past_cumack = 0;
9941 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9945 * In this case nrsm becomes
9946 * nrsm->r_start = end;
9947 * nrsm->r_end = rsm->r_end;
9948 * which is un-acked.
9949 * <and>
9950 * rsm->r_end = nrsm->r_start;
9951 * i.e. the remaining un-acked
9952 * piece is left on the left
9953 * hand side.
9955 * So we start like this
9956 * rsm |----------| (not acked)
9957 * sackblk |---|
9958 * build it so we have
9959 * rsm |---| (acked)
9960 * nrsm |------| (not acked)
9962 counter_u64_add(rack_sack_splits, 1);
9963 rack_clone_rsm(rack, nrsm, rsm, end);
9964 rsm->r_flags &= (~RACK_HAS_FIN);
9965 rsm->r_just_ret = 0;
9966 #ifndef INVARIANTS
9967 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
9968 #else
9969 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
9970 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p",
9971 nrsm, insret, rack, rsm);
9973 #endif
9974 if (rsm->r_in_tmap) {
9975 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9976 nrsm->r_in_tmap = 1;
9978 nrsm->r_dupack = 0;
9979 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
9980 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
9981 changed += (rsm->r_end - rsm->r_start);
9982 if (rsm->r_flags & RACK_WAS_LOST) {
9983 int my_chg;
9985 my_chg = (rsm->r_end - rsm->r_start);
9986 rsm->r_flags &= ~RACK_WAS_LOST;
9987 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg),
9988 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9989 if (my_chg <= rack->r_ctl.rc_considered_lost)
9990 rack->r_ctl.rc_considered_lost -= my_chg;
9991 else
9992 rack->r_ctl.rc_considered_lost = 0;
9994 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
9996 if (rsm->r_in_tmap) /* should be true */
9997 rack_log_sack_passed(tp, rack, rsm, cts);
9998 /* Is Reordering occuring? */
9999 if (rsm->r_flags & RACK_SACK_PASSED) {
10000 rsm->r_flags &= ~RACK_SACK_PASSED;
10001 rack->r_ctl.rc_reorder_ts = cts;
10002 if (rack->r_ctl.rc_reorder_ts == 0)
10003 rack->r_ctl.rc_reorder_ts = 1;
10005 if (rack->app_limited_needs_set)
10006 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
10007 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10008 rsm->r_flags |= RACK_ACKED;
10009 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end);
10010 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
10011 if (rsm->r_in_tmap) {
10012 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10013 rsm->r_in_tmap = 0;
10016 } else if (start != end){
10018 * The block was already acked.
10020 counter_u64_add(rack_sack_skipped_acked, 1);
10022 out:
10023 if (rsm &&
10024 ((rsm->r_flags & RACK_TLP) == 0) &&
10025 (rsm->r_flags & RACK_ACKED)) {
10027 * Now can we merge where we worked
10028 * with either the previous or
10029 * next block?
10031 next = tqhash_next(rack->r_ctl.tqh, rsm);
10032 while (next) {
10033 if (next->r_flags & RACK_TLP)
10034 break;
10035 /* Only allow merges between ones in or out of GP window */
10036 if ((next->r_flags & RACK_IN_GP_WIN) &&
10037 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10038 break;
10040 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10041 ((next->r_flags & RACK_IN_GP_WIN) == 0)) {
10042 break;
10044 if (rsm->bindex != next->bindex)
10045 break;
10046 if (rsm->r_flags & RACK_STRADDLE)
10047 break;
10048 if (rsm->r_flags & RACK_IS_PCM)
10049 break;
10050 if (next->r_flags & RACK_STRADDLE)
10051 break;
10052 if (next->r_flags & RACK_IS_PCM)
10053 break;
10054 if (next->r_flags & RACK_ACKED) {
10055 /* yep this and next can be merged */
10056 rsm = rack_merge_rsm(rack, rsm, next);
10057 next = tqhash_next(rack->r_ctl.tqh, rsm);
10058 } else
10059 break;
10061 /* Now what about the previous? */
10062 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10063 while (prev) {
10064 if (prev->r_flags & RACK_TLP)
10065 break;
10066 /* Only allow merges between ones in or out of GP window */
10067 if ((prev->r_flags & RACK_IN_GP_WIN) &&
10068 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10069 break;
10071 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10072 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) {
10073 break;
10075 if (rsm->bindex != prev->bindex)
10076 break;
10077 if (rsm->r_flags & RACK_STRADDLE)
10078 break;
10079 if (rsm->r_flags & RACK_IS_PCM)
10080 break;
10081 if (prev->r_flags & RACK_STRADDLE)
10082 break;
10083 if (prev->r_flags & RACK_IS_PCM)
10084 break;
10085 if (prev->r_flags & RACK_ACKED) {
10086 /* yep the previous and this can be merged */
10087 rsm = rack_merge_rsm(rack, prev, rsm);
10088 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10089 } else
10090 break;
10093 if (used_ref == 0) {
10094 counter_u64_add(rack_sack_proc_all, 1);
10095 } else {
10096 counter_u64_add(rack_sack_proc_short, 1);
10098 /* Save off the next one for quick reference. */
10099 nrsm = tqhash_find(rack->r_ctl.tqh, end);
10100 *prsm = rack->r_ctl.rc_sacklast = nrsm;
10101 return (changed);
10104 static void inline
10105 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
10107 struct rack_sendmap *tmap;
10109 tmap = NULL;
10110 while (rsm && (rsm->r_flags & RACK_ACKED)) {
10111 /* Its no longer sacked, mark it so */
10112 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10113 #ifdef INVARIANTS
10114 if (rsm->r_in_tmap) {
10115 panic("rack:%p rsm:%p flags:0x%x in tmap?",
10116 rack, rsm, rsm->r_flags);
10118 #endif
10119 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
10120 /* Rebuild it into our tmap */
10121 if (tmap == NULL) {
10122 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10123 tmap = rsm;
10124 } else {
10125 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
10126 tmap = rsm;
10128 tmap->r_in_tmap = 1;
10129 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
10132 * Now lets possibly clear the sack filter so we start
10133 * recognizing sacks that cover this area.
10135 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
10140 static void inline
10141 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from)
10144 * We look at advancing the end send time for our GP
10145 * measurement tracking only as the cumulative acknowledgment
10146 * moves forward. You might wonder about this, why not
10147 * at every transmission or retransmission within the
10148 * GP window update the rc_gp_cumack_ts? Well its rather
10149 * nuanced but basically the GP window *may* expand (as
10150 * it does below) or worse and harder to track it may shrink.
10152 * This last makes it impossible to track at the time of
10153 * the send, since you may set forward your rc_gp_cumack_ts
10154 * when you send, because that send *is* in your currently
10155 * "guessed" window, but then it shrinks. Now which was
10156 * the send time of the last bytes in the window, by the
10157 * time you ask that question that part of the sendmap
10158 * is freed. So you don't know and you will have too
10159 * long of send window. Instead by updating the time
10160 * marker only when the cumack advances this assures us
10161 * that we will have only the sends in the window of our
10162 * GP measurement.
10164 * Another complication from this is the
10165 * merging of sendmap entries. During SACK processing this
10166 * can happen to conserve the sendmap size. That breaks
10167 * everything down in tracking the send window of the GP
10168 * estimate. So to prevent that and keep it working with
10169 * a tiny bit more limited merging, we only allow like
10170 * types to be merged. I.e. if two sends are in the GP window
10171 * then its ok to merge them together. If two sends are not
10172 * in the GP window its ok to merge them together too. Though
10173 * one send in and one send out cannot be merged. We combine
10174 * this with never allowing the shrinking of the GP window when
10175 * we are in recovery so that we can properly calculate the
10176 * sending times.
10178 * This all of course seems complicated, because it is.. :)
10180 * The cum-ack is being advanced upon the sendmap.
10181 * If we are not doing a GP estimate don't
10182 * proceed.
10184 uint64_t ts;
10186 if ((tp->t_flags & TF_GPUTINPROG) == 0)
10187 return;
10189 * If this sendmap entry is going
10190 * beyond the measurement window we had picked,
10191 * expand the measurement window by that much.
10193 if (SEQ_GT(rsm->r_end, tp->gput_ack)) {
10194 tp->gput_ack = rsm->r_end;
10197 * If we have not setup a ack, then we
10198 * have no idea if the newly acked pieces
10199 * will be "in our seq measurement range". If
10200 * it is when we clear the app_limited_needs_set
10201 * flag the timestamp will be updated.
10203 if (rack->app_limited_needs_set)
10204 return;
10206 * Finally, we grab out the latest timestamp
10207 * that this packet was sent and then see
10208 * if:
10209 * a) The packet touches are newly defined GP range.
10210 * b) The time is greater than (newer) than the
10211 * one we currently have. If so we update
10212 * our sending end time window.
10214 * Note we *do not* do this at send time. The reason
10215 * is that if you do you *may* pick up a newer timestamp
10216 * for a range you are not going to measure. We project
10217 * out how far and then sometimes modify that to be
10218 * smaller. If that occurs then you will have a send
10219 * that does not belong to the range included.
10221 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <=
10222 rack->r_ctl.rc_gp_cumack_ts)
10223 return;
10224 if (rack_in_gp_window(tp, rsm)) {
10225 rack->r_ctl.rc_gp_cumack_ts = ts;
10226 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end,
10227 __LINE__, from, rsm);
10231 static void
10232 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime)
10234 struct rack_sendmap *rsm;
10236 * The ACK point is advancing to th_ack, we must drop off
10237 * the packets in the rack log and calculate any eligble
10238 * RTT's.
10241 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) {
10243 * If we have some sack blocks in the filter
10244 * lets prune them out by calling sfb with no blocks.
10246 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack);
10248 if (SEQ_GT(th_ack, tp->snd_una)) {
10249 /* Clear any app ack remembered settings */
10250 rack->r_ctl.cleared_app_ack = 0;
10252 rack->r_wanted_output = 1;
10253 if (SEQ_GT(th_ack, tp->snd_una))
10254 rack->r_ctl.last_cumack_advance = acktime;
10256 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */
10257 if ((rack->rc_last_tlp_acked_set == 1)&&
10258 (rack->rc_last_tlp_past_cumack == 1) &&
10259 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) {
10261 * We have reached the point where our last rack
10262 * tlp retransmit sequence is ahead of the cum-ack.
10263 * This can only happen when the cum-ack moves all
10264 * the way around (its been a full 2^^31+1 bytes
10265 * or more since we sent a retransmitted TLP). Lets
10266 * turn off the valid flag since its not really valid.
10268 * Note since sack's also turn on this event we have
10269 * a complication, we have to wait to age it out until
10270 * the cum-ack is by the TLP before checking which is
10271 * what the next else clause does.
10273 rack_log_dsack_event(rack, 9, __LINE__,
10274 rack->r_ctl.last_tlp_acked_start,
10275 rack->r_ctl.last_tlp_acked_end);
10276 rack->rc_last_tlp_acked_set = 0;
10277 rack->rc_last_tlp_past_cumack = 0;
10278 } else if ((rack->rc_last_tlp_acked_set == 1) &&
10279 (rack->rc_last_tlp_past_cumack == 0) &&
10280 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) {
10282 * It is safe to start aging TLP's out.
10284 rack->rc_last_tlp_past_cumack = 1;
10286 /* We do the same for the tlp send seq as well */
10287 if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
10288 (rack->rc_last_sent_tlp_past_cumack == 1) &&
10289 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) {
10290 rack_log_dsack_event(rack, 9, __LINE__,
10291 rack->r_ctl.last_sent_tlp_seq,
10292 (rack->r_ctl.last_sent_tlp_seq +
10293 rack->r_ctl.last_sent_tlp_len));
10294 rack->rc_last_sent_tlp_seq_valid = 0;
10295 rack->rc_last_sent_tlp_past_cumack = 0;
10296 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
10297 (rack->rc_last_sent_tlp_past_cumack == 0) &&
10298 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) {
10300 * It is safe to start aging TLP's send.
10302 rack->rc_last_sent_tlp_past_cumack = 1;
10304 more:
10305 rsm = tqhash_min(rack->r_ctl.tqh);
10306 if (rsm == NULL) {
10307 if ((th_ack - 1) == tp->iss) {
10309 * For the SYN incoming case we will not
10310 * have called tcp_output for the sending of
10311 * the SYN, so there will be no map. All
10312 * other cases should probably be a panic.
10314 return;
10316 if (tp->t_flags & TF_SENTFIN) {
10317 /* if we sent a FIN we often will not have map */
10318 return;
10320 #ifdef INVARIANTS
10321 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n",
10323 tp->t_state, th_ack, rack,
10324 tp->snd_una, tp->snd_max);
10325 #endif
10326 return;
10328 if (SEQ_LT(th_ack, rsm->r_start)) {
10329 /* Huh map is missing this */
10330 #ifdef INVARIANTS
10331 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
10332 rsm->r_start,
10333 th_ack, tp->t_state, rack->r_state);
10334 #endif
10335 return;
10337 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
10339 /* Now was it a retransmitted TLP? */
10340 if ((rsm->r_flags & RACK_TLP) &&
10341 (rsm->r_rtr_cnt > 1)) {
10343 * Yes, this rsm was a TLP and retransmitted, remember that
10344 * since if a DSACK comes back on this we don't want
10345 * to think of it as a reordered segment. This may
10346 * get updated again with possibly even other TLPs
10347 * in flight, but thats ok. Only when we don't send
10348 * a retransmitted TLP for 1/2 the sequences space
10349 * will it get turned off (above).
10351 if (rack->rc_last_tlp_acked_set &&
10352 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
10354 * We already turned this on since the end matches,
10355 * the previous one was a partially ack now we
10356 * are getting another one (maybe all of it).
10358 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
10360 * Lets make sure we have all of it though.
10362 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
10363 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10364 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
10365 rack->r_ctl.last_tlp_acked_end);
10367 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
10368 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10369 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
10370 rack->r_ctl.last_tlp_acked_end);
10372 } else {
10373 rack->rc_last_tlp_past_cumack = 1;
10374 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10375 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10376 rack->rc_last_tlp_acked_set = 1;
10377 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
10380 /* Now do we consume the whole thing? */
10381 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
10382 if (SEQ_GEQ(th_ack, rsm->r_end)) {
10383 /* Its all consumed. */
10384 uint32_t left;
10385 uint8_t newly_acked;
10387 if (rsm->r_flags & RACK_WAS_LOST) {
10389 * This can happen when we marked it as lost
10390 * and yet before retransmitting we get an ack
10391 * which can happen due to reordering.
10393 rsm->r_flags &= ~RACK_WAS_LOST;
10394 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
10395 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
10396 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
10397 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
10398 else
10399 rack->r_ctl.rc_considered_lost = 0;
10401 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
10402 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
10403 rsm->r_rtr_bytes = 0;
10405 * Record the time of highest cumack sent if its in our measurement
10406 * window and possibly bump out the end.
10408 rack_rsm_sender_update(rack, tp, rsm, 4);
10409 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
10410 if (rsm->r_in_tmap) {
10411 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10412 rsm->r_in_tmap = 0;
10414 newly_acked = 1;
10415 if (rsm->r_flags & RACK_ACKED) {
10417 * It was acked on the scoreboard -- remove
10418 * it from total
10420 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10421 newly_acked = 0;
10422 } else if (rsm->r_flags & RACK_SACK_PASSED) {
10424 * There are segments ACKED on the
10425 * scoreboard further up. We are seeing
10426 * reordering.
10428 rsm->r_flags &= ~RACK_SACK_PASSED;
10429 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10430 rsm->r_flags |= RACK_ACKED;
10431 rack->r_ctl.rc_reorder_ts = cts;
10432 if (rack->r_ctl.rc_reorder_ts == 0)
10433 rack->r_ctl.rc_reorder_ts = 1;
10434 if (rack->r_ent_rec_ns) {
10436 * We have sent no more, and we saw an sack
10437 * then ack arrive.
10439 rack->r_might_revert = 1;
10441 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end);
10442 } else {
10443 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end);
10445 if ((rsm->r_flags & RACK_TO_REXT) &&
10446 (tp->t_flags & TF_RCVD_TSTMP) &&
10447 (to->to_flags & TOF_TS) &&
10448 (to->to_tsecr != 0) &&
10449 (tp->t_flags & TF_PREVVALID)) {
10451 * We can use the timestamp to see
10452 * if this retransmission was from the
10453 * first transmit. If so we made a mistake.
10455 tp->t_flags &= ~TF_PREVVALID;
10456 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
10457 /* The first transmit is what this ack is for */
10458 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__);
10461 left = th_ack - rsm->r_end;
10462 if (rack->app_limited_needs_set && newly_acked)
10463 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
10464 /* Free back to zone */
10465 rack_free(rack, rsm);
10466 if (left) {
10467 goto more;
10469 /* Check for reneging */
10470 rsm = tqhash_min(rack->r_ctl.tqh);
10471 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
10473 * The peer has moved snd_una up to
10474 * the edge of this send, i.e. one
10475 * that it had previously acked. The only
10476 * way that can be true if the peer threw
10477 * away data (space issues) that it had
10478 * previously sacked (else it would have
10479 * given us snd_una up to (rsm->r_end).
10480 * We need to undo the acked markings here.
10482 * Note we have to look to make sure th_ack is
10483 * our rsm->r_start in case we get an old ack
10484 * where th_ack is behind snd_una.
10486 rack_peer_reneges(rack, rsm, th_ack);
10488 return;
10490 if (rsm->r_flags & RACK_ACKED) {
10492 * It was acked on the scoreboard -- remove it from
10493 * total for the part being cum-acked.
10495 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
10496 } else {
10497 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack);
10499 /* And what about the lost flag? */
10500 if (rsm->r_flags & RACK_WAS_LOST) {
10502 * This can happen when we marked it as lost
10503 * and yet before retransmitting we get an ack
10504 * which can happen due to reordering. In this
10505 * case its only a partial ack of the send.
10507 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)),
10508 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack));
10509 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start))
10510 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start;
10511 else
10512 rack->r_ctl.rc_considered_lost = 0;
10515 * Clear the dup ack count for
10516 * the piece that remains.
10518 rsm->r_dupack = 0;
10519 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
10520 if (rsm->r_rtr_bytes) {
10522 * It was retransmitted adjust the
10523 * sack holes for what was acked.
10525 int ack_am;
10527 ack_am = (th_ack - rsm->r_start);
10528 if (ack_am >= rsm->r_rtr_bytes) {
10529 rack->r_ctl.rc_holes_rxt -= ack_am;
10530 rsm->r_rtr_bytes -= ack_am;
10534 * Update where the piece starts and record
10535 * the time of send of highest cumack sent if
10536 * its in our GP range.
10538 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
10539 /* Now we need to move our offset forward too */
10540 if (rsm->m &&
10541 ((rsm->orig_m_len != rsm->m->m_len) ||
10542 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
10543 /* Fix up the orig_m_len and possibly the mbuf offset */
10544 rack_adjust_orig_mlen(rsm);
10546 rsm->soff += (th_ack - rsm->r_start);
10547 rack_rsm_sender_update(rack, tp, rsm, 5);
10548 /* The trim will move th_ack into r_start for us */
10549 tqhash_trim(rack->r_ctl.tqh, th_ack);
10550 /* Now do we need to move the mbuf fwd too? */
10552 struct mbuf *m;
10553 uint32_t soff;
10555 m = rsm->m;
10556 soff = rsm->soff;
10557 if (m) {
10558 while (soff >= m->m_len) {
10559 soff -= m->m_len;
10560 KASSERT((m->m_next != NULL),
10561 (" rsm:%p off:%u soff:%u m:%p",
10562 rsm, rsm->soff, soff, m));
10563 m = m->m_next;
10564 if (m == NULL) {
10566 * This is a fall-back that prevents a panic. In reality
10567 * we should be able to walk the mbuf's and find our place.
10568 * At this point snd_una has not been updated with the sbcut() yet
10569 * but tqhash_trim did update rsm->r_start so the offset calcuation
10570 * should work fine. This is undesirable since we will take cache
10571 * hits to access the socket buffer. And even more puzzling is that
10572 * it happens occasionally. It should not :(
10574 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
10575 (rsm->r_start - tp->snd_una),
10576 &soff);
10577 break;
10581 * Now save in our updated values.
10583 rsm->m = m;
10584 rsm->soff = soff;
10585 rsm->orig_m_len = rsm->m->m_len;
10586 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
10589 if (rack->app_limited_needs_set &&
10590 SEQ_GEQ(th_ack, tp->gput_seq))
10591 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
10594 static void
10595 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
10597 struct rack_sendmap *rsm;
10598 int sack_pass_fnd = 0;
10600 if (rack->r_might_revert) {
10602 * Ok we have reordering, have not sent anything, we
10603 * might want to revert the congestion state if nothing
10604 * further has SACK_PASSED on it. Lets check.
10606 * We also get here when we have DSACKs come in for
10607 * all the data that we FR'd. Note that a rxt or tlp
10608 * timer clears this from happening.
10611 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
10612 if (rsm->r_flags & RACK_SACK_PASSED) {
10613 sack_pass_fnd = 1;
10614 break;
10617 if (sack_pass_fnd == 0) {
10619 * We went into recovery
10620 * incorrectly due to reordering!
10622 int orig_cwnd;
10624 rack->r_ent_rec_ns = 0;
10625 orig_cwnd = tp->snd_cwnd;
10626 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
10627 tp->snd_recover = tp->snd_una;
10628 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
10629 if (IN_RECOVERY(tp->t_flags)) {
10630 rack_exit_recovery(tp, rack, 3);
10631 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){
10633 * We were in recovery, had an RTO
10634 * and then re-entered recovery (more sack's arrived)
10635 * and we have properly recorded the old ssthresh from
10636 * the first recovery. We want to be able to slow-start
10637 * back to this level. The ssthresh from the timeout
10638 * and then back into recovery will end up most likely
10639 * to be min(cwnd=1mss, 2mss). Which makes it basically
10640 * so we get no slow-start after our RTO.
10642 rack->rto_from_rec = 0;
10643 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh)
10644 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh;
10648 rack->r_might_revert = 0;
10653 static int
10654 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
10657 uint32_t am, l_end;
10658 int was_tlp = 0;
10660 if (SEQ_GT(end, start))
10661 am = end - start;
10662 else
10663 am = 0;
10664 if ((rack->rc_last_tlp_acked_set ) &&
10665 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) &&
10666 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) {
10668 * The DSACK is because of a TLP which we don't
10669 * do anything with the reordering window over since
10670 * it was not reordering that caused the DSACK but
10671 * our previous retransmit TLP.
10673 rack_log_dsack_event(rack, 7, __LINE__, start, end);
10674 was_tlp = 1;
10675 goto skip_dsack_round;
10677 if (rack->rc_last_sent_tlp_seq_valid) {
10678 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len;
10679 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) &&
10680 (SEQ_LEQ(end, l_end))) {
10682 * This dsack is from the last sent TLP, ignore it
10683 * for reordering purposes.
10685 rack_log_dsack_event(rack, 7, __LINE__, start, end);
10686 was_tlp = 1;
10687 goto skip_dsack_round;
10690 if (rack->rc_dsack_round_seen == 0) {
10691 rack->rc_dsack_round_seen = 1;
10692 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max;
10693 rack->r_ctl.num_dsack++;
10694 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */
10695 rack_log_dsack_event(rack, 2, __LINE__, 0, 0);
10697 skip_dsack_round:
10699 * We keep track of how many DSACK blocks we get
10700 * after a recovery incident.
10702 rack->r_ctl.dsack_byte_cnt += am;
10703 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
10704 rack->r_ctl.retran_during_recovery &&
10705 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
10707 * False recovery most likely culprit is reordering. If
10708 * nothing else is missing we need to revert.
10710 rack->r_might_revert = 1;
10711 rack_handle_might_revert(rack->rc_tp, rack);
10712 rack->r_might_revert = 0;
10713 rack->r_ctl.retran_during_recovery = 0;
10714 rack->r_ctl.dsack_byte_cnt = 0;
10716 return (was_tlp);
10719 static uint32_t
10720 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una)
10722 return (((tp->snd_max - snd_una) -
10723 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt);
10726 static int32_t
10727 rack_compute_pipe(struct tcpcb *tp)
10729 return ((int32_t)do_rack_compute_pipe(tp,
10730 (struct tcp_rack *)tp->t_fb_ptr,
10731 tp->snd_una));
10734 static void
10735 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
10737 /* Deal with changed and PRR here (in recovery only) */
10738 uint32_t pipe, snd_una;
10740 rack->r_ctl.rc_prr_delivered += changed;
10742 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
10744 * It is all outstanding, we are application limited
10745 * and thus we don't need more room to send anything.
10746 * Note we use tp->snd_una here and not th_ack because
10747 * the data as yet not been cut from the sb.
10749 rack->r_ctl.rc_prr_sndcnt = 0;
10750 return;
10752 /* Compute prr_sndcnt */
10753 if (SEQ_GT(tp->snd_una, th_ack)) {
10754 snd_una = tp->snd_una;
10755 } else {
10756 snd_una = th_ack;
10758 pipe = do_rack_compute_pipe(tp, rack, snd_una);
10759 if (pipe > tp->snd_ssthresh) {
10760 long sndcnt;
10762 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
10763 if (rack->r_ctl.rc_prr_recovery_fs > 0)
10764 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
10765 else {
10766 rack->r_ctl.rc_prr_sndcnt = 0;
10767 rack_log_to_prr(rack, 9, 0, __LINE__);
10768 sndcnt = 0;
10770 sndcnt++;
10771 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
10772 sndcnt -= rack->r_ctl.rc_prr_out;
10773 else
10774 sndcnt = 0;
10775 rack->r_ctl.rc_prr_sndcnt = sndcnt;
10776 rack_log_to_prr(rack, 10, 0, __LINE__);
10777 } else {
10778 uint32_t limit;
10780 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
10781 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
10782 else
10783 limit = 0;
10784 if (changed > limit)
10785 limit = changed;
10786 limit += ctf_fixed_maxseg(tp);
10787 if (tp->snd_ssthresh > pipe) {
10788 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
10789 rack_log_to_prr(rack, 11, 0, __LINE__);
10790 } else {
10791 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
10792 rack_log_to_prr(rack, 12, 0, __LINE__);
10797 static void
10798 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck,
10799 int *dsack_seen, int *sacks_seen)
10801 uint32_t changed;
10802 struct tcp_rack *rack;
10803 struct rack_sendmap *rsm;
10804 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
10805 register uint32_t th_ack;
10806 int32_t i, j, k, num_sack_blks = 0;
10807 uint32_t cts, acked, ack_point;
10808 int loop_start = 0;
10809 uint32_t tsused;
10810 uint32_t segsiz;
10813 INP_WLOCK_ASSERT(tptoinpcb(tp));
10814 if (tcp_get_flags(th) & TH_RST) {
10815 /* We don't log resets */
10816 return;
10818 rack = (struct tcp_rack *)tp->t_fb_ptr;
10819 cts = tcp_get_usecs(NULL);
10820 rsm = tqhash_min(rack->r_ctl.tqh);
10821 changed = 0;
10822 th_ack = th->th_ack;
10823 segsiz = ctf_fixed_maxseg(rack->rc_tp);
10824 if (BYTES_THIS_ACK(tp, th) >= segsiz) {
10826 * You only get credit for
10827 * MSS and greater (and you get extra
10828 * credit for larger cum-ack moves).
10830 int ac;
10832 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
10833 counter_u64_add(rack_ack_total, ac);
10835 if (SEQ_GT(th_ack, tp->snd_una)) {
10836 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
10837 tp->t_acktime = ticks;
10839 if (rsm && SEQ_GT(th_ack, rsm->r_start))
10840 changed = th_ack - rsm->r_start;
10841 if (changed) {
10842 rack_process_to_cumack(tp, rack, th_ack, cts, to,
10843 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
10845 if ((to->to_flags & TOF_SACK) == 0) {
10846 /* We are done nothing left and no sack. */
10847 rack_handle_might_revert(tp, rack);
10849 * For cases where we struck a dup-ack
10850 * with no SACK, add to the changes so
10851 * PRR will work right.
10853 if (dup_ack_struck && (changed == 0)) {
10854 changed += ctf_fixed_maxseg(rack->rc_tp);
10856 goto out;
10858 /* Sack block processing */
10859 if (SEQ_GT(th_ack, tp->snd_una))
10860 ack_point = th_ack;
10861 else
10862 ack_point = tp->snd_una;
10863 for (i = 0; i < to->to_nsacks; i++) {
10864 bcopy((to->to_sacks + i * TCPOLEN_SACK),
10865 &sack, sizeof(sack));
10866 sack.start = ntohl(sack.start);
10867 sack.end = ntohl(sack.end);
10868 if (SEQ_GT(sack.end, sack.start) &&
10869 SEQ_GT(sack.start, ack_point) &&
10870 SEQ_LT(sack.start, tp->snd_max) &&
10871 SEQ_GT(sack.end, ack_point) &&
10872 SEQ_LEQ(sack.end, tp->snd_max)) {
10873 sack_blocks[num_sack_blks] = sack;
10874 num_sack_blks++;
10875 } else if (SEQ_LEQ(sack.start, th_ack) &&
10876 SEQ_LEQ(sack.end, th_ack)) {
10877 int was_tlp;
10879 if (dsack_seen != NULL)
10880 *dsack_seen = 1;
10881 was_tlp = rack_note_dsack(rack, sack.start, sack.end);
10883 * Its a D-SACK block.
10885 tcp_record_dsack(tp, sack.start, sack.end, was_tlp);
10888 if (rack->rc_dsack_round_seen) {
10889 /* Is the dsack roound over? */
10890 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) {
10891 /* Yes it is */
10892 rack->rc_dsack_round_seen = 0;
10893 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
10897 * Sort the SACK blocks so we can update the rack scoreboard with
10898 * just one pass.
10900 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks,
10901 num_sack_blks, th->th_ack);
10902 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
10903 if (sacks_seen != NULL)
10904 *sacks_seen = num_sack_blks;
10905 if (num_sack_blks == 0) {
10906 /* Nothing to sack, but we need to update counts */
10907 goto out_with_totals;
10909 /* Its a sack of some sort */
10910 if (num_sack_blks < 2) {
10911 /* Only one, we don't need to sort */
10912 goto do_sack_work;
10914 /* Sort the sacks */
10915 for (i = 0; i < num_sack_blks; i++) {
10916 for (j = i + 1; j < num_sack_blks; j++) {
10917 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
10918 sack = sack_blocks[i];
10919 sack_blocks[i] = sack_blocks[j];
10920 sack_blocks[j] = sack;
10925 * Now are any of the sack block ends the same (yes some
10926 * implementations send these)?
10928 again:
10929 if (num_sack_blks == 0)
10930 goto out_with_totals;
10931 if (num_sack_blks > 1) {
10932 for (i = 0; i < num_sack_blks; i++) {
10933 for (j = i + 1; j < num_sack_blks; j++) {
10934 if (sack_blocks[i].end == sack_blocks[j].end) {
10936 * Ok these two have the same end we
10937 * want the smallest end and then
10938 * throw away the larger and start
10939 * again.
10941 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
10943 * The second block covers
10944 * more area use that
10946 sack_blocks[i].start = sack_blocks[j].start;
10949 * Now collapse out the dup-sack and
10950 * lower the count
10952 for (k = (j + 1); k < num_sack_blks; k++) {
10953 sack_blocks[j].start = sack_blocks[k].start;
10954 sack_blocks[j].end = sack_blocks[k].end;
10955 j++;
10957 num_sack_blks--;
10958 goto again;
10963 do_sack_work:
10965 * First lets look to see if
10966 * we have retransmitted and
10967 * can use the transmit next?
10969 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
10970 if (rsm &&
10971 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
10972 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
10974 * We probably did the FR and the next
10975 * SACK in continues as we would expect.
10977 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz);
10978 if (acked) {
10979 rack->r_wanted_output = 1;
10980 changed += acked;
10982 if (num_sack_blks == 1) {
10984 * This is what we would expect from
10985 * a normal implementation to happen
10986 * after we have retransmitted the FR,
10987 * i.e the sack-filter pushes down
10988 * to 1 block and the next to be retransmitted
10989 * is the sequence in the sack block (has more
10990 * are acked). Count this as ACK'd data to boost
10991 * up the chances of recovering any false positives.
10993 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
10994 counter_u64_add(rack_express_sack, 1);
10995 goto out_with_totals;
10996 } else {
10998 * Start the loop through the
10999 * rest of blocks, past the first block.
11001 loop_start = 1;
11004 counter_u64_add(rack_sack_total, 1);
11005 rsm = rack->r_ctl.rc_sacklast;
11006 for (i = loop_start; i < num_sack_blks; i++) {
11007 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz);
11008 if (acked) {
11009 rack->r_wanted_output = 1;
11010 changed += acked;
11013 out_with_totals:
11014 if (num_sack_blks > 1) {
11016 * You get an extra stroke if
11017 * you have more than one sack-blk, this
11018 * could be where we are skipping forward
11019 * and the sack-filter is still working, or
11020 * it could be an attacker constantly
11021 * moving us.
11023 counter_u64_add(rack_move_some, 1);
11025 out:
11026 if (changed) {
11027 /* Something changed cancel the rack timer */
11028 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11030 tsused = tcp_get_usecs(NULL);
11031 rsm = tcp_rack_output(tp, rack, tsused);
11032 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
11033 rsm &&
11034 ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
11035 /* Enter recovery */
11036 entered_recovery = 1;
11037 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__);
11039 * When we enter recovery we need to assure we send
11040 * one packet.
11042 if (rack->rack_no_prr == 0) {
11043 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
11044 rack_log_to_prr(rack, 8, 0, __LINE__);
11046 rack->r_timer_override = 1;
11047 rack->r_early = 0;
11048 rack->r_ctl.rc_agg_early = 0;
11049 } else if (IN_FASTRECOVERY(tp->t_flags) &&
11050 rsm &&
11051 (rack->r_rr_config == 3)) {
11053 * Assure we can output and we get no
11054 * remembered pace time except the retransmit.
11056 rack->r_timer_override = 1;
11057 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
11058 rack->r_ctl.rc_resend = rsm;
11060 if (IN_FASTRECOVERY(tp->t_flags) &&
11061 (rack->rack_no_prr == 0) &&
11062 (entered_recovery == 0)) {
11063 rack_update_prr(tp, rack, changed, th_ack);
11064 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
11065 ((tcp_in_hpts(rack->rc_tp) == 0) &&
11066 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
11068 * If you are pacing output you don't want
11069 * to override.
11071 rack->r_early = 0;
11072 rack->r_ctl.rc_agg_early = 0;
11073 rack->r_timer_override = 1;
11078 static void
11079 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack)
11081 struct rack_sendmap *rsm;
11083 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11084 while (rsm) {
11086 * We need to skip anything already set
11087 * to be retransmitted.
11089 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
11090 (rsm->r_flags & RACK_MUST_RXT)) {
11091 rsm = TAILQ_NEXT(rsm, r_tnext);
11092 continue;
11094 break;
11096 if (rsm && (rsm->r_dupack < 0xff)) {
11097 rsm->r_dupack++;
11098 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
11099 struct timeval tv;
11100 uint32_t cts;
11102 * Here we see if we need to retransmit. For
11103 * a SACK type connection if enough time has passed
11104 * we will get a return of the rsm. For a non-sack
11105 * connection we will get the rsm returned if the
11106 * dupack value is 3 or more.
11108 cts = tcp_get_usecs(&tv);
11109 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
11110 if (rack->r_ctl.rc_resend != NULL) {
11111 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
11112 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
11113 th_ack, __LINE__);
11115 rack->r_wanted_output = 1;
11116 rack->r_timer_override = 1;
11117 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
11119 } else {
11120 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
11125 static void
11126 rack_check_bottom_drag(struct tcpcb *tp,
11127 struct tcp_rack *rack,
11128 struct socket *so)
11131 * So what is dragging bottom?
11133 * Dragging bottom means you were under pacing and had a
11134 * delay in processing inbound acks waiting on our pacing
11135 * timer to expire. While you were waiting all of the acknowledgments
11136 * for the packets you sent have arrived. This means we are pacing
11137 * way underneath the bottleneck to the point where our Goodput
11138 * measurements stop working, since they require more than one
11139 * ack (usually at least 8 packets worth with multiple acks so we can
11140 * gauge the inter-ack times). If that occurs we have a real problem
11141 * since we are stuck in a hole that we can't get out of without
11142 * something speeding us up.
11144 * We also check to see if we are widdling down to just one segment
11145 * outstanding. If this occurs and we have room to send in our cwnd/rwnd
11146 * then we are adding the delayed ack interval into our measurments and
11147 * we need to speed up slightly.
11149 uint32_t segsiz, minseg;
11151 segsiz = ctf_fixed_maxseg(tp);
11152 minseg = segsiz;
11153 if (tp->snd_max == tp->snd_una) {
11155 * We are doing dynamic pacing and we are way
11156 * under. Basically everything got acked while
11157 * we were still waiting on the pacer to expire.
11159 * This means we need to boost the b/w in
11160 * addition to any earlier boosting of
11161 * the multiplier.
11163 uint64_t lt_bw;
11165 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM);
11166 lt_bw = rack_get_lt_bw(rack);
11167 rack->rc_dragged_bottom = 1;
11168 rack_validate_multipliers_at_or_above100(rack);
11169 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
11170 (rack->dis_lt_bw == 0) &&
11171 (rack->use_lesser_lt_bw == 0) &&
11172 (lt_bw > 0)) {
11174 * Lets use the long-term b/w we have
11175 * been getting as a base.
11177 if (rack->rc_gp_filled == 0) {
11178 if (lt_bw > ONE_POINT_TWO_MEG) {
11180 * If we have no measurement
11181 * don't let us set in more than
11182 * 1.2Mbps. If we are still too
11183 * low after pacing with this we
11184 * will hopefully have a max b/w
11185 * available to sanity check things.
11187 lt_bw = ONE_POINT_TWO_MEG;
11189 rack->r_ctl.rc_rtt_diff = 0;
11190 rack->r_ctl.gp_bw = lt_bw;
11191 rack->rc_gp_filled = 1;
11192 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
11193 rack->r_ctl.num_measurements = RACK_REQ_AVG;
11194 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
11195 } else if (lt_bw > rack->r_ctl.gp_bw) {
11196 rack->r_ctl.rc_rtt_diff = 0;
11197 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
11198 rack->r_ctl.num_measurements = RACK_REQ_AVG;
11199 rack->r_ctl.gp_bw = lt_bw;
11200 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
11201 } else
11202 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11203 if ((rack->gp_ready == 0) &&
11204 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
11205 /* We have enough measurements now */
11206 rack->gp_ready = 1;
11207 if (rack->dgp_on ||
11208 rack->rack_hibeta)
11209 rack_set_cc_pacing(rack);
11210 if (rack->defer_options)
11211 rack_apply_deferred_options(rack);
11213 } else {
11215 * zero rtt possibly?, settle for just an old increase.
11217 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11219 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
11220 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
11221 minseg)) &&
11222 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
11223 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
11224 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
11225 (segsiz * rack_req_segs))) {
11227 * We are doing dynamic GP pacing and
11228 * we have everything except 1MSS or less
11229 * bytes left out. We are still pacing away.
11230 * And there is data that could be sent, This
11231 * means we are inserting delayed ack time in
11232 * our measurements because we are pacing too slow.
11234 rack_validate_multipliers_at_or_above100(rack);
11235 rack->rc_dragged_bottom = 1;
11236 rack_increase_bw_mul(rack, -1, 0, 0, 1);
11240 #ifdef TCP_REQUEST_TRK
11241 static void
11242 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
11243 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err)
11245 int do_log;
11247 do_log = tcp_bblogging_on(rack->rc_tp);
11248 if (do_log == 0) {
11249 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0)
11250 return;
11251 /* We only allow the three below with point logging on */
11252 if ((mod != HYBRID_LOG_RULES_APP) &&
11253 (mod != HYBRID_LOG_RULES_SET) &&
11254 (mod != HYBRID_LOG_REQ_COMP))
11255 return;
11258 if (do_log) {
11259 union tcp_log_stackspecific log;
11260 struct timeval tv;
11262 /* Convert our ms to a microsecond */
11263 memset(&log, 0, sizeof(log));
11264 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
11265 log.u_bbr.flex1 = seq;
11266 log.u_bbr.cwnd_gain = line;
11267 if (cur != NULL) {
11268 uint64_t off;
11270 log.u_bbr.flex2 = cur->start_seq;
11271 log.u_bbr.flex3 = cur->end_seq;
11272 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
11273 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff);
11274 log.u_bbr.flex6 = cur->flags;
11275 log.u_bbr.pkts_out = cur->hybrid_flags;
11276 log.u_bbr.rttProp = cur->timestamp;
11277 log.u_bbr.cur_del_rate = cur->cspr;
11278 log.u_bbr.bw_inuse = cur->start;
11279 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff);
11280 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ;
11281 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff);
11282 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ;
11283 log.u_bbr.inhpts = 1;
11284 #ifdef TCP_REQUEST_TRK
11285 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
11286 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
11287 #endif
11288 } else {
11289 log.u_bbr.flex2 = err;
11292 * Fill in flex7 to be CHD (catchup|hybrid|DGP)
11294 log.u_bbr.flex7 = rack->rc_catch_up;
11295 log.u_bbr.flex7 <<= 1;
11296 log.u_bbr.flex7 |= rack->rc_hybrid_mode;
11297 log.u_bbr.flex7 <<= 1;
11298 log.u_bbr.flex7 |= rack->dgp_on;
11300 * Compose bbr_state to be a bit wise 0000ADHF
11301 * where A is the always_pace flag
11302 * where D is the dgp_on flag
11303 * where H is the hybrid_mode on flag
11304 * where F is the use_fixed_rate flag.
11306 log.u_bbr.bbr_state = rack->rc_always_pace;
11307 log.u_bbr.bbr_state <<= 1;
11308 log.u_bbr.bbr_state |= rack->dgp_on;
11309 log.u_bbr.bbr_state <<= 1;
11310 log.u_bbr.bbr_state |= rack->rc_hybrid_mode;
11311 log.u_bbr.bbr_state <<= 1;
11312 log.u_bbr.bbr_state |= rack->use_fixed_rate;
11313 log.u_bbr.flex8 = mod;
11314 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap;
11315 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg;
11316 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
11317 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start;
11318 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error;
11319 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop;
11320 tcp_log_event(rack->rc_tp, NULL,
11321 &rack->rc_inp->inp_socket->so_rcv,
11322 &rack->rc_inp->inp_socket->so_snd,
11323 TCP_HYBRID_PACING_LOG, 0,
11324 0, &log, false, NULL, __func__, __LINE__, &tv);
11327 #endif
11329 #ifdef TCP_REQUEST_TRK
11330 static void
11331 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
11333 struct tcp_sendfile_track *rc_cur, *orig_ent;
11334 struct tcpcb *tp;
11335 int err = 0;
11337 orig_ent = rack->r_ctl.rc_last_sft;
11338 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq);
11339 if (rc_cur == NULL) {
11340 /* If not in the beginning what about the end piece */
11341 if (rack->rc_hybrid_mode)
11342 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
11343 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1));
11344 } else {
11345 err = 12345;
11347 /* If we find no parameters we are in straight DGP mode */
11348 if(rc_cur == NULL) {
11349 /* None found for this seq, just DGP for now */
11350 if (rack->rc_hybrid_mode) {
11351 rack->r_ctl.client_suggested_maxseg = 0;
11352 rack->rc_catch_up = 0;
11353 if (rack->cspr_is_fcc == 0)
11354 rack->r_ctl.bw_rate_cap = 0;
11355 else
11356 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap;
11358 if (rack->rc_hybrid_mode) {
11359 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
11361 if (rack->r_ctl.rc_last_sft) {
11362 rack->r_ctl.rc_last_sft = NULL;
11364 return;
11366 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) {
11367 /* This entry was never setup for hybrid pacing on/off etc */
11368 if (rack->rc_hybrid_mode) {
11369 rack->r_ctl.client_suggested_maxseg = 0;
11370 rack->rc_catch_up = 0;
11371 rack->r_ctl.bw_rate_cap = 0;
11373 if (rack->r_ctl.rc_last_sft) {
11374 rack->r_ctl.rc_last_sft = NULL;
11376 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) {
11377 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND;
11378 rc_cur->first_send = cts;
11379 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes;
11380 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes;
11382 return;
11385 * Ok if we have a new entry *or* have never
11386 * set up an entry we need to proceed. If
11387 * we have already set it up this entry we
11388 * just continue along with what we already
11389 * setup.
11391 tp = rack->rc_tp;
11392 if ((rack->r_ctl.rc_last_sft != NULL) &&
11393 (rack->r_ctl.rc_last_sft == rc_cur)) {
11394 /* Its already in place */
11395 if (rack->rc_hybrid_mode)
11396 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0);
11397 return;
11399 if (rack->rc_hybrid_mode == 0) {
11400 rack->r_ctl.rc_last_sft = rc_cur;
11401 if (orig_ent) {
11402 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes;
11403 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes;
11404 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND;
11406 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
11407 return;
11409 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){
11410 /* Compensate for all the header overhead's */
11411 if (rack->cspr_is_fcc == 0)
11412 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr);
11413 else
11414 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr);
11415 } else {
11416 if (rack->rc_hybrid_mode) {
11417 if (rack->cspr_is_fcc == 0)
11418 rack->r_ctl.bw_rate_cap = 0;
11419 else
11420 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap;
11423 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS)
11424 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg;
11425 else
11426 rack->r_ctl.client_suggested_maxseg = 0;
11427 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) {
11429 * It is the same timestamp as the previous one
11430 * add the hybrid flag that will indicate we use
11431 * sendtime not arrival time for catch-up mode.
11433 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME;
11435 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) &&
11436 (rc_cur->cspr > 0)) {
11437 uint64_t len;
11439 rack->rc_catch_up = 1;
11441 * Calculate the deadline time, first set the
11442 * time to when the request arrived.
11444 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) {
11446 * For cases where its a duplicate tm (we received more
11447 * than one request for a tm) we want to use now, the point
11448 * where we are just sending the first bit of the request.
11450 rc_cur->deadline = cts;
11451 } else {
11453 * Here we have a different tm from the last request
11454 * so we want to use arrival time as our base.
11456 rc_cur->deadline = rc_cur->localtime;
11459 * Next calculate the length and compensate for
11460 * TLS if need be.
11462 len = rc_cur->end - rc_cur->start;
11463 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) {
11465 * This session is doing TLS. Take a swag guess
11466 * at the overhead.
11468 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len);
11471 * Now considering the size, and the cspr, what is the time that
11472 * would be required at the cspr rate. Here we use the raw
11473 * cspr value since the client only looks at the raw data. We
11474 * do use len which includes TLS overhead, but not the TCP/IP etc.
11475 * That will get made up for in the CU pacing rate set.
11477 len *= HPTS_USEC_IN_SEC;
11478 len /= rc_cur->cspr;
11479 rc_cur->deadline += len;
11480 } else {
11481 rack->rc_catch_up = 0;
11482 rc_cur->deadline = 0;
11484 if (rack->r_ctl.client_suggested_maxseg != 0) {
11486 * We need to reset the max pace segs if we have a
11487 * client_suggested_maxseg.
11489 rack_set_pace_segments(tp, rack, __LINE__, NULL);
11491 if (orig_ent) {
11492 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes;
11493 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes;
11494 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND;
11496 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
11497 /* Remember it for next time and for CU mode */
11498 rack->r_ctl.rc_last_sft = rc_cur;
11499 rack->r_ctl.last_tm_mark = rc_cur->timestamp;
11501 #endif
11503 static void
11504 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
11506 #ifdef TCP_REQUEST_TRK
11507 struct tcp_sendfile_track *ent;
11509 ent = rack->r_ctl.rc_last_sft;
11510 if ((ent == NULL) ||
11511 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) ||
11512 (SEQ_GEQ(seq, ent->end_seq))) {
11513 /* Time to update the track. */
11514 rack_set_dgp_hybrid_mode(rack, seq, len, cts);
11515 ent = rack->r_ctl.rc_last_sft;
11517 /* Out of all */
11518 if (ent == NULL) {
11519 return;
11521 if (SEQ_LT(ent->end_seq, (seq + len))) {
11523 * This is the case where our end_seq guess
11524 * was wrong. This is usually due to TLS having
11525 * more bytes then our guess. It could also be the
11526 * case that the client sent in two requests closely
11527 * and the SB is full of both so we are sending part
11528 * of each (end|beg). In such a case lets move this
11529 * guys end to match the end of this send. That
11530 * way it will complete when all of it is acked.
11532 ent->end_seq = (seq + len);
11533 if (rack->rc_hybrid_mode)
11534 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__);
11536 /* Now validate we have set the send time of this one */
11537 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) {
11538 ent->flags |= TCP_TRK_TRACK_FLG_FSND;
11539 ent->first_send = cts;
11540 ent->sent_at_fs = rack->rc_tp->t_sndbytes;
11541 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes;
11543 #endif
11546 static void
11547 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
11550 * The fast output path is enabled and we
11551 * have moved the cumack forward. Lets see if
11552 * we can expand forward the fast path length by
11553 * that amount. What we would ideally like to
11554 * do is increase the number of bytes in the
11555 * fast path block (left_to_send) by the
11556 * acked amount. However we have to gate that
11557 * by two factors:
11558 * 1) The amount outstanding and the rwnd of the peer
11559 * (i.e. we don't want to exceed the rwnd of the peer).
11560 * <and>
11561 * 2) The amount of data left in the socket buffer (i.e.
11562 * we can't send beyond what is in the buffer).
11564 * Note that this does not take into account any increase
11565 * in the cwnd. We will only extend the fast path by
11566 * what was acked.
11568 uint32_t new_total, gating_val;
11570 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
11571 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
11572 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
11573 if (new_total <= gating_val) {
11574 /* We can increase left_to_send by the acked amount */
11575 counter_u64_add(rack_extended_rfo, 1);
11576 rack->r_ctl.fsb.left_to_send = new_total;
11577 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
11578 ("rack:%p left_to_send:%u sbavail:%u out:%u",
11579 rack, rack->r_ctl.fsb.left_to_send,
11580 sbavail(&rack->rc_inp->inp_socket->so_snd),
11581 (tp->snd_max - tp->snd_una)));
11586 static void
11587 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb)
11590 * Here any sendmap entry that points to the
11591 * beginning mbuf must be adjusted to the correct
11592 * offset. This must be called with:
11593 * 1) The socket buffer locked
11594 * 2) snd_una adjusted to its new position.
11596 * Note that (2) implies rack_ack_received has also
11597 * been called and all the sbcut's have been done.
11599 * We grab the first mbuf in the socket buffer and
11600 * then go through the front of the sendmap, recalculating
11601 * the stored offset for any sendmap entry that has
11602 * that mbuf. We must use the sb functions to do this
11603 * since its possible an add was done has well as
11604 * the subtraction we may have just completed. This should
11605 * not be a penalty though, since we just referenced the sb
11606 * to go in and trim off the mbufs that we freed (of course
11607 * there will be a penalty for the sendmap references though).
11609 * Note also with INVARIANT on, we validate with a KASSERT
11610 * that the first sendmap entry has a soff of 0.
11613 struct mbuf *m;
11614 struct rack_sendmap *rsm;
11615 tcp_seq snd_una;
11616 #ifdef INVARIANTS
11617 int first_processed = 0;
11618 #endif
11620 snd_una = rack->rc_tp->snd_una;
11621 SOCKBUF_LOCK_ASSERT(sb);
11622 m = sb->sb_mb;
11623 rsm = tqhash_min(rack->r_ctl.tqh);
11624 if ((rsm == NULL) || (m == NULL)) {
11625 /* Nothing outstanding */
11626 return;
11628 /* The very first RSM's mbuf must point to the head mbuf in the sb */
11629 KASSERT((rsm->m == m),
11630 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb",
11631 rack, sb, rsm));
11632 while (rsm->m && (rsm->m == m)) {
11633 /* one to adjust */
11634 #ifdef INVARIANTS
11635 struct mbuf *tm;
11636 uint32_t soff;
11638 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
11639 if ((rsm->orig_m_len != m->m_len) ||
11640 (rsm->orig_t_space != M_TRAILINGROOM(m))){
11641 rack_adjust_orig_mlen(rsm);
11643 if (first_processed == 0) {
11644 KASSERT((rsm->soff == 0),
11645 ("Rack:%p rsm:%p -- rsm at head but soff not zero",
11646 rack, rsm));
11647 first_processed = 1;
11649 if ((rsm->soff != soff) || (rsm->m != tm)) {
11651 * This is not a fatal error, we anticipate it
11652 * might happen (the else code), so we count it here
11653 * so that under invariant we can see that it really
11654 * does happen.
11656 counter_u64_add(rack_adjust_map_bw, 1);
11658 rsm->m = tm;
11659 rsm->soff = soff;
11660 if (tm) {
11661 rsm->orig_m_len = rsm->m->m_len;
11662 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11663 } else {
11664 rsm->orig_m_len = 0;
11665 rsm->orig_t_space = 0;
11667 #else
11668 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
11669 if (rsm->m) {
11670 rsm->orig_m_len = rsm->m->m_len;
11671 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11672 } else {
11673 rsm->orig_m_len = 0;
11674 rsm->orig_t_space = 0;
11676 #endif
11677 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
11678 if (rsm == NULL)
11679 break;
11683 #ifdef TCP_REQUEST_TRK
11684 static inline void
11685 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
11687 struct tcp_sendfile_track *ent;
11688 int i;
11690 if ((rack->rc_hybrid_mode == 0) &&
11691 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) {
11693 * Just do normal completions hybrid pacing is not on
11694 * and CLDL is off as well.
11696 tcp_req_check_for_comp(rack->rc_tp, th_ack);
11697 return;
11700 * Originally I was just going to find the th_ack associated
11701 * with an entry. But then I realized a large strech ack could
11702 * in theory ack two or more requests at once. So instead we
11703 * need to find all entries that are completed by th_ack not
11704 * just a single entry and do our logging.
11706 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
11707 while (ent != NULL) {
11709 * We may be doing hybrid pacing or CLDL and need more details possibly
11710 * so we do it manually instead of calling
11711 * tcp_req_check_for_comp()
11713 uint64_t laa, tim, data, cbw, ftim;
11715 /* Ok this ack frees it */
11716 rack_log_hybrid(rack, th_ack,
11717 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0);
11718 rack_log_hybrid_sends(rack, ent, __LINE__);
11719 /* calculate the time based on the ack arrival */
11720 data = ent->end - ent->start;
11721 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
11722 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) {
11723 if (ent->first_send > ent->localtime)
11724 ftim = ent->first_send;
11725 else
11726 ftim = ent->localtime;
11727 } else {
11728 /* TSNH */
11729 ftim = ent->localtime;
11731 if (laa > ent->localtime)
11732 tim = laa - ftim;
11733 else
11734 tim = 0;
11735 cbw = data * HPTS_USEC_IN_SEC;
11736 if (tim > 0)
11737 cbw /= tim;
11738 else
11739 cbw = 0;
11740 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__);
11742 * Check to see if we are freeing what we are pointing to send wise
11743 * if so be sure to NULL the pointer so we know we are no longer
11744 * set to anything.
11746 if (ent == rack->r_ctl.rc_last_sft) {
11747 rack->r_ctl.rc_last_sft = NULL;
11748 if (rack->rc_hybrid_mode) {
11749 rack->rc_catch_up = 0;
11750 if (rack->cspr_is_fcc == 0)
11751 rack->r_ctl.bw_rate_cap = 0;
11752 else
11753 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap;
11754 rack->r_ctl.client_suggested_maxseg = 0;
11757 /* Generate the log that the tcp_netflix call would have */
11758 tcp_req_log_req_info(rack->rc_tp, ent,
11759 i, TCP_TRK_REQ_LOG_FREED, 0, 0);
11760 /* Free it and see if there is another one */
11761 tcp_req_free_a_slot(rack->rc_tp, ent);
11762 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
11765 #endif
11769 * Return value of 1, we do not need to call rack_process_data().
11770 * return value of 0, rack_process_data can be called.
11771 * For ret_val if its 0 the TCP is locked, if its non-zero
11772 * its unlocked and probably unsafe to touch the TCB.
11774 static int
11775 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11776 struct tcpcb *tp, struct tcpopt *to,
11777 uint32_t tiwin, int32_t tlen,
11778 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen)
11780 int32_t ourfinisacked = 0;
11781 int32_t nsegs, acked_amount;
11782 int32_t acked;
11783 struct mbuf *mfree;
11784 struct tcp_rack *rack;
11785 int32_t under_pacing = 0;
11786 int32_t post_recovery = 0;
11787 uint32_t p_cwnd;
11789 INP_WLOCK_ASSERT(tptoinpcb(tp));
11791 rack = (struct tcp_rack *)tp->t_fb_ptr;
11792 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) {
11793 /* Checking SEG.ACK against ISS is definitely redundant. */
11794 tp->t_flags2 |= TF2_NO_ISS_CHECK;
11796 if (!V_tcp_insecure_ack) {
11797 tcp_seq seq_min;
11798 bool ghost_ack_check;
11800 if (tp->t_flags2 & TF2_NO_ISS_CHECK) {
11801 /* Check for too old ACKs (RFC 5961, Section 5.2). */
11802 seq_min = tp->snd_una - tp->max_sndwnd;
11803 ghost_ack_check = false;
11804 } else {
11805 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) {
11806 /* Checking for ghost ACKs is stricter. */
11807 seq_min = tp->iss + 1;
11808 ghost_ack_check = true;
11809 } else {
11811 * Checking for too old ACKs (RFC 5961,
11812 * Section 5.2) is stricter.
11814 seq_min = tp->snd_una - tp->max_sndwnd;
11815 ghost_ack_check = false;
11818 if (SEQ_LT(th->th_ack, seq_min)) {
11819 if (ghost_ack_check)
11820 TCPSTAT_INC(tcps_rcvghostack);
11821 else
11822 TCPSTAT_INC(tcps_rcvacktooold);
11823 /* Send challenge ACK. */
11824 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
11825 rack->r_wanted_output = 1;
11826 return (1);
11829 if (SEQ_GT(th->th_ack, tp->snd_max)) {
11830 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
11831 rack->r_wanted_output = 1;
11832 return (1);
11834 if (rack->gp_ready &&
11835 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
11836 under_pacing = 1;
11838 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
11839 int in_rec, dup_ack_struck = 0;
11840 int dsack_seen = 0, sacks_seen = 0;
11842 in_rec = IN_FASTRECOVERY(tp->t_flags);
11843 if (rack->rc_in_persist) {
11844 tp->t_rxtshift = 0;
11845 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
11846 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
11849 if ((th->th_ack == tp->snd_una) &&
11850 (tiwin == tp->snd_wnd) &&
11851 (orig_tlen == 0) &&
11852 ((to->to_flags & TOF_SACK) == 0)) {
11853 rack_strike_dupack(rack, th->th_ack);
11854 dup_ack_struck = 1;
11856 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)),
11857 dup_ack_struck, &dsack_seen, &sacks_seen);
11860 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
11862 * Old ack, behind (or duplicate to) the last one rcv'd
11863 * Note: We mark reordering is occuring if its
11864 * less than and we have not closed our window.
11866 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
11867 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
11868 if (rack->r_ctl.rc_reorder_ts == 0)
11869 rack->r_ctl.rc_reorder_ts = 1;
11871 return (0);
11874 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
11875 * something we sent.
11877 if (tp->t_flags & TF_NEEDSYN) {
11879 * T/TCP: Connection was half-synchronized, and our SYN has
11880 * been ACK'd (so connection is now fully synchronized). Go
11881 * to non-starred state, increment snd_una for ACK of SYN,
11882 * and check if we can do window scaling.
11884 tp->t_flags &= ~TF_NEEDSYN;
11885 tp->snd_una++;
11886 /* Do window scaling? */
11887 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11888 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11889 tp->rcv_scale = tp->request_r_scale;
11890 /* Send window already scaled. */
11893 nsegs = max(1, m->m_pkthdr.lro_nsegs);
11895 acked = BYTES_THIS_ACK(tp, th);
11896 if (acked) {
11898 * Any time we move the cum-ack forward clear
11899 * keep-alive tied probe-not-answered. The
11900 * persists clears its own on entry.
11902 rack->probe_not_answered = 0;
11904 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
11905 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
11907 * If we just performed our first retransmit, and the ACK arrives
11908 * within our recovery window, then it was a mistake to do the
11909 * retransmit in the first place. Recover our original cwnd and
11910 * ssthresh, and proceed to transmit where we left off.
11912 if ((tp->t_flags & TF_PREVVALID) &&
11913 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
11914 tp->t_flags &= ~TF_PREVVALID;
11915 if (tp->t_rxtshift == 1 &&
11916 (int)(ticks - tp->t_badrxtwin) < 0)
11917 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
11919 if (acked) {
11920 /* assure we are not backed off */
11921 tp->t_rxtshift = 0;
11922 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
11923 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
11924 rack->rc_tlp_in_progress = 0;
11925 rack->r_ctl.rc_tlp_cnt_out = 0;
11927 * If it is the RXT timer we want to
11928 * stop it, so we can restart a TLP.
11930 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
11931 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
11932 #ifdef TCP_REQUEST_TRK
11933 rack_req_check_for_comp(rack, th->th_ack);
11934 #endif
11937 * If we have a timestamp reply, update smoothed round trip time. If
11938 * no timestamp is present but transmit timer is running and timed
11939 * sequence number was acked, update smoothed round trip time. Since
11940 * we now have an rtt measurement, cancel the timer backoff (cf.,
11941 * Phil Karn's retransmit alg.). Recompute the initial retransmit
11942 * timer.
11944 * Some boxes send broken timestamp replies during the SYN+ACK
11945 * phase, ignore timestamps of 0 or we could calculate a huge RTT
11946 * and blow up the retransmit timer.
11949 * If all outstanding data is acked, stop retransmit timer and
11950 * remember to restart (more output or persist). If there is more
11951 * data to be acked, restart retransmit timer, using current
11952 * (possibly backed-off) value.
11954 if (acked == 0) {
11955 if (ofia)
11956 *ofia = ourfinisacked;
11957 return (0);
11959 if (IN_RECOVERY(tp->t_flags)) {
11960 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
11961 (SEQ_LT(th->th_ack, tp->snd_max))) {
11962 tcp_rack_partialack(tp);
11963 } else {
11964 rack_post_recovery(tp, th->th_ack);
11965 post_recovery = 1;
11967 * Grab the segsiz, multiply by 2 and add the snd_cwnd
11968 * that is the max the CC should add if we are exiting
11969 * recovery and doing a late add.
11971 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
11972 p_cwnd <<= 1;
11973 p_cwnd += tp->snd_cwnd;
11975 } else if ((rack->rto_from_rec == 1) &&
11976 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
11978 * We were in recovery, hit a rxt timeout
11979 * and never re-entered recovery. The timeout(s)
11980 * made up all the lost data. In such a case
11981 * we need to clear the rto_from_rec flag.
11983 rack->rto_from_rec = 0;
11986 * Let the congestion control algorithm update congestion control
11987 * related information. This typically means increasing the
11988 * congestion window.
11990 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery);
11991 if (post_recovery &&
11992 (tp->snd_cwnd > p_cwnd)) {
11993 /* Must be non-newreno (cubic) getting too ahead of itself */
11994 tp->snd_cwnd = p_cwnd;
11996 SOCK_SENDBUF_LOCK(so);
11997 acked_amount = min(acked, (int)sbavail(&so->so_snd));
11998 tp->snd_wnd -= acked_amount;
11999 mfree = sbcut_locked(&so->so_snd, acked_amount);
12000 if ((sbused(&so->so_snd) == 0) &&
12001 (acked > acked_amount) &&
12002 (tp->t_state >= TCPS_FIN_WAIT_1) &&
12003 (tp->t_flags & TF_SENTFIN)) {
12005 * We must be sure our fin
12006 * was sent and acked (we can be
12007 * in FIN_WAIT_1 without having
12008 * sent the fin).
12010 ourfinisacked = 1;
12012 tp->snd_una = th->th_ack;
12013 /* wakeups? */
12014 if (acked_amount && sbavail(&so->so_snd))
12015 rack_adjust_sendmap_head(rack, &so->so_snd);
12016 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
12017 /* NB: sowwakeup_locked() does an implicit unlock. */
12018 sowwakeup_locked(so);
12019 m_freem(mfree);
12020 if (SEQ_GT(tp->snd_una, tp->snd_recover))
12021 tp->snd_recover = tp->snd_una;
12023 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
12024 tp->snd_nxt = tp->snd_max;
12026 if (under_pacing &&
12027 (rack->use_fixed_rate == 0) &&
12028 (rack->in_probe_rtt == 0) &&
12029 rack->rc_gp_dyn_mul &&
12030 rack->rc_always_pace) {
12031 /* Check if we are dragging bottom */
12032 rack_check_bottom_drag(tp, rack, so);
12034 if (tp->snd_una == tp->snd_max) {
12035 /* Nothing left outstanding */
12036 tp->t_flags &= ~TF_PREVVALID;
12037 if (rack->r_ctl.rc_went_idle_time == 0)
12038 rack->r_ctl.rc_went_idle_time = 1;
12039 rack->r_ctl.retran_during_recovery = 0;
12040 rack->r_ctl.dsack_byte_cnt = 0;
12041 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
12042 if (sbavail(&tptosocket(tp)->so_snd) == 0)
12043 tp->t_acktime = 0;
12044 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12045 rack->rc_suspicious = 0;
12046 /* Set need output so persist might get set */
12047 rack->r_wanted_output = 1;
12048 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
12049 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
12050 (sbavail(&so->so_snd) == 0) &&
12051 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
12053 * The socket was gone and the
12054 * peer sent data (now or in the past), time to
12055 * reset him.
12057 *ret_val = 1;
12058 /* tcp_close will kill the inp pre-log the Reset */
12059 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
12060 tp = tcp_close(tp);
12061 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
12062 return (1);
12065 if (ofia)
12066 *ofia = ourfinisacked;
12067 return (0);
12071 static void
12072 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line,
12073 int dir, uint32_t flags, struct rack_sendmap *rsm)
12075 if (tcp_bblogging_on(rack->rc_tp)) {
12076 union tcp_log_stackspecific log;
12077 struct timeval tv;
12079 memset(&log, 0, sizeof(log));
12080 log.u_bbr.flex1 = cnt;
12081 log.u_bbr.flex2 = split;
12082 log.u_bbr.flex3 = out;
12083 log.u_bbr.flex4 = line;
12084 log.u_bbr.flex5 = rack->r_must_retran;
12085 log.u_bbr.flex6 = flags;
12086 log.u_bbr.flex7 = rack->rc_has_collapsed;
12087 log.u_bbr.flex8 = dir; /*
12088 * 1 is collapsed, 0 is uncollapsed,
12089 * 2 is log of a rsm being marked, 3 is a split.
12091 if (rsm == NULL)
12092 log.u_bbr.rttProp = 0;
12093 else
12094 log.u_bbr.rttProp = (uintptr_t)rsm;
12095 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
12096 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
12097 TCP_LOG_EVENTP(rack->rc_tp, NULL,
12098 &rack->rc_inp->inp_socket->so_rcv,
12099 &rack->rc_inp->inp_socket->so_snd,
12100 TCP_RACK_LOG_COLLAPSE, 0,
12101 0, &log, false, &tv);
12105 static void
12106 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line)
12109 * Here all we do is mark the collapsed point and set the flag.
12110 * This may happen again and again, but there is no
12111 * sense splitting our map until we know where the
12112 * peer finally lands in the collapse.
12114 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
12115 if ((rack->rc_has_collapsed == 0) ||
12116 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd)))
12117 counter_u64_add(rack_collapsed_win_seen, 1);
12118 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd;
12119 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max;
12120 rack->rc_has_collapsed = 1;
12121 rack->r_collapse_point_valid = 1;
12122 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL);
12125 static void
12126 rack_un_collapse_window(struct tcp_rack *rack, int line)
12128 struct rack_sendmap *nrsm, *rsm;
12129 int cnt = 0, split = 0;
12130 int insret __diagused;
12133 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
12134 rack->rc_has_collapsed = 0;
12135 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
12136 if (rsm == NULL) {
12137 /* Nothing to do maybe the peer ack'ed it all */
12138 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
12139 return;
12141 /* Now do we need to split this one? */
12142 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
12143 rack_log_collapse(rack, rsm->r_start, rsm->r_end,
12144 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
12145 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
12146 if (nrsm == NULL) {
12147 /* We can't get a rsm, mark all? */
12148 nrsm = rsm;
12149 goto no_split;
12151 /* Clone it */
12152 split = 1;
12153 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
12154 #ifndef INVARIANTS
12155 (void)tqhash_insert(rack->r_ctl.tqh, nrsm);
12156 #else
12157 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
12158 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
12159 nrsm, insret, rack, rsm);
12161 #endif
12162 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
12163 rack->r_ctl.last_collapse_point, __LINE__);
12164 if (rsm->r_in_tmap) {
12165 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
12166 nrsm->r_in_tmap = 1;
12169 * Set in the new RSM as the
12170 * collapsed starting point
12172 rsm = nrsm;
12175 no_split:
12176 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) {
12177 cnt++;
12178 nrsm->r_flags |= RACK_RWND_COLLAPSED;
12179 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm);
12180 cnt++;
12182 if (cnt) {
12183 counter_u64_add(rack_collapsed_win, 1);
12185 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
12188 static void
12189 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
12190 int32_t tlen, int32_t tfo_syn)
12192 if (DELAY_ACK(tp, tlen) || tfo_syn) {
12193 rack_timer_cancel(tp, rack,
12194 rack->r_ctl.rc_rcvtime, __LINE__);
12195 tp->t_flags |= TF_DELACK;
12196 } else {
12197 rack->r_wanted_output = 1;
12198 tp->t_flags |= TF_ACKNOW;
12202 static void
12203 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
12206 * If fast output is in progress, lets validate that
12207 * the new window did not shrink on us and make it
12208 * so fast output should end.
12210 if (rack->r_fast_output) {
12211 uint32_t out;
12214 * Calculate what we will send if left as is
12215 * and compare that to our send window.
12217 out = ctf_outstanding(tp);
12218 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
12219 /* ok we have an issue */
12220 if (out >= tp->snd_wnd) {
12221 /* Turn off fast output the window is met or collapsed */
12222 rack->r_fast_output = 0;
12223 } else {
12224 /* we have some room left */
12225 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
12226 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
12227 /* If not at least 1 full segment never mind */
12228 rack->r_fast_output = 0;
12236 * Return value of 1, the TCB is unlocked and most
12237 * likely gone, return value of 0, the TCP is still
12238 * locked.
12240 static int
12241 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
12242 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
12243 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
12246 * Update window information. Don't look at window if no ACK: TAC's
12247 * send garbage on first SYN.
12249 int32_t nsegs;
12250 int32_t tfo_syn;
12251 struct tcp_rack *rack;
12253 INP_WLOCK_ASSERT(tptoinpcb(tp));
12255 rack = (struct tcp_rack *)tp->t_fb_ptr;
12256 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12257 if ((thflags & TH_ACK) &&
12258 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
12259 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
12260 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
12261 /* keep track of pure window updates */
12262 if (tlen == 0 &&
12263 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
12264 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
12265 tp->snd_wnd = tiwin;
12266 rack_validate_fo_sendwin_up(tp, rack);
12267 tp->snd_wl1 = th->th_seq;
12268 tp->snd_wl2 = th->th_ack;
12269 if (tp->snd_wnd > tp->max_sndwnd)
12270 tp->max_sndwnd = tp->snd_wnd;
12271 rack->r_wanted_output = 1;
12272 } else if (thflags & TH_ACK) {
12273 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
12274 tp->snd_wnd = tiwin;
12275 rack_validate_fo_sendwin_up(tp, rack);
12276 tp->snd_wl1 = th->th_seq;
12277 tp->snd_wl2 = th->th_ack;
12280 if (tp->snd_wnd < ctf_outstanding(tp))
12281 /* The peer collapsed the window */
12282 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
12283 else if (rack->rc_has_collapsed)
12284 rack_un_collapse_window(rack, __LINE__);
12285 if ((rack->r_collapse_point_valid) &&
12286 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point)))
12287 rack->r_collapse_point_valid = 0;
12288 /* Was persist timer active and now we have window space? */
12289 if ((rack->rc_in_persist != 0) &&
12290 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12291 rack->r_ctl.rc_pace_min_segs))) {
12292 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12293 tp->snd_nxt = tp->snd_max;
12294 /* Make sure we output to start the timer */
12295 rack->r_wanted_output = 1;
12297 /* Do we enter persists? */
12298 if ((rack->rc_in_persist == 0) &&
12299 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12300 TCPS_HAVEESTABLISHED(tp->t_state) &&
12301 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
12302 sbavail(&tptosocket(tp)->so_snd) &&
12303 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
12305 * Here the rwnd is less than
12306 * the pacing size, we are established,
12307 * nothing is outstanding, and there is
12308 * data to send. Enter persists.
12310 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
12312 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
12313 m_freem(m);
12314 return (0);
12317 * don't process the URG bit, ignore them drag
12318 * along the up.
12320 tp->rcv_up = tp->rcv_nxt;
12323 * Process the segment text, merging it into the TCP sequencing
12324 * queue, and arranging for acknowledgment of receipt if necessary.
12325 * This process logically involves adjusting tp->rcv_wnd as data is
12326 * presented to the user (this happens in tcp_usrreq.c, case
12327 * PRU_RCVD). If a FIN has already been received on this connection
12328 * then we just ignore the text.
12330 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
12331 (tp->t_flags & TF_FASTOPEN));
12332 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
12333 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
12334 tcp_seq save_start = th->th_seq;
12335 tcp_seq save_rnxt = tp->rcv_nxt;
12336 int save_tlen = tlen;
12338 m_adj(m, drop_hdrlen); /* delayed header drop */
12340 * Insert segment which includes th into TCP reassembly
12341 * queue with control block tp. Set thflags to whether
12342 * reassembly now includes a segment with FIN. This handles
12343 * the common case inline (segment is the next to be
12344 * received on an established connection, and the queue is
12345 * empty), avoiding linkage into and removal from the queue
12346 * and repetition of various conversions. Set DELACK for
12347 * segments received in order, but ack immediately when
12348 * segments are out of order (so fast retransmit can work).
12350 if (th->th_seq == tp->rcv_nxt &&
12351 SEGQ_EMPTY(tp) &&
12352 (TCPS_HAVEESTABLISHED(tp->t_state) ||
12353 tfo_syn)) {
12354 #ifdef NETFLIX_SB_LIMITS
12355 u_int mcnt, appended;
12357 if (so->so_rcv.sb_shlim) {
12358 mcnt = m_memcnt(m);
12359 appended = 0;
12360 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
12361 CFO_NOSLEEP, NULL) == false) {
12362 counter_u64_add(tcp_sb_shlim_fails, 1);
12363 m_freem(m);
12364 return (0);
12367 #endif
12368 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
12369 tp->rcv_nxt += tlen;
12370 if (tlen &&
12371 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
12372 (tp->t_fbyte_in == 0)) {
12373 tp->t_fbyte_in = ticks;
12374 if (tp->t_fbyte_in == 0)
12375 tp->t_fbyte_in = 1;
12376 if (tp->t_fbyte_out && tp->t_fbyte_in)
12377 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
12379 thflags = tcp_get_flags(th) & TH_FIN;
12380 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
12381 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
12382 SOCK_RECVBUF_LOCK(so);
12383 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
12384 m_freem(m);
12385 } else {
12386 int32_t newsize;
12388 if (tlen > 0) {
12389 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
12390 if (newsize)
12391 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
12392 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
12394 #ifdef NETFLIX_SB_LIMITS
12395 appended =
12396 #endif
12397 sbappendstream_locked(&so->so_rcv, m, 0);
12399 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
12400 /* NB: sorwakeup_locked() does an implicit unlock. */
12401 sorwakeup_locked(so);
12402 #ifdef NETFLIX_SB_LIMITS
12403 if (so->so_rcv.sb_shlim && appended != mcnt)
12404 counter_fo_release(so->so_rcv.sb_shlim,
12405 mcnt - appended);
12406 #endif
12407 } else {
12409 * XXX: Due to the header drop above "th" is
12410 * theoretically invalid by now. Fortunately
12411 * m_adj() doesn't actually frees any mbufs when
12412 * trimming from the head.
12414 tcp_seq temp = save_start;
12416 thflags = tcp_reass(tp, th, &temp, &tlen, m);
12417 tp->t_flags |= TF_ACKNOW;
12418 if (tp->t_flags & TF_WAKESOR) {
12419 tp->t_flags &= ~TF_WAKESOR;
12420 /* NB: sorwakeup_locked() does an implicit unlock. */
12421 sorwakeup_locked(so);
12424 if ((tp->t_flags & TF_SACK_PERMIT) &&
12425 (save_tlen > 0) &&
12426 TCPS_HAVEESTABLISHED(tp->t_state)) {
12427 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
12429 * DSACK actually handled in the fastpath
12430 * above.
12432 tcp_update_sack_list(tp, save_start,
12433 save_start + save_tlen);
12434 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
12435 if ((tp->rcv_numsacks >= 1) &&
12436 (tp->sackblks[0].end == save_start)) {
12438 * Partial overlap, recorded at todrop
12439 * above.
12441 tcp_update_sack_list(tp,
12442 tp->sackblks[0].start,
12443 tp->sackblks[0].end);
12444 } else {
12445 tcp_update_dsack_list(tp, save_start,
12446 save_start + save_tlen);
12448 } else if (tlen >= save_tlen) {
12449 /* Update of sackblks. */
12450 tcp_update_dsack_list(tp, save_start,
12451 save_start + save_tlen);
12452 } else if (tlen > 0) {
12453 tcp_update_dsack_list(tp, save_start,
12454 save_start + tlen);
12457 } else {
12458 m_freem(m);
12459 thflags &= ~TH_FIN;
12463 * If FIN is received ACK the FIN and let the user know that the
12464 * connection is closing.
12466 if (thflags & TH_FIN) {
12467 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
12468 /* The socket upcall is handled by socantrcvmore. */
12469 socantrcvmore(so);
12471 * If connection is half-synchronized (ie NEEDSYN
12472 * flag on) then delay ACK, so it may be piggybacked
12473 * when SYN is sent. Otherwise, since we received a
12474 * FIN then no more input can be expected, send ACK
12475 * now.
12477 if (tp->t_flags & TF_NEEDSYN) {
12478 rack_timer_cancel(tp, rack,
12479 rack->r_ctl.rc_rcvtime, __LINE__);
12480 tp->t_flags |= TF_DELACK;
12481 } else {
12482 tp->t_flags |= TF_ACKNOW;
12484 tp->rcv_nxt++;
12486 switch (tp->t_state) {
12488 * In SYN_RECEIVED and ESTABLISHED STATES enter the
12489 * CLOSE_WAIT state.
12491 case TCPS_SYN_RECEIVED:
12492 tp->t_starttime = ticks;
12493 /* FALLTHROUGH */
12494 case TCPS_ESTABLISHED:
12495 rack_timer_cancel(tp, rack,
12496 rack->r_ctl.rc_rcvtime, __LINE__);
12497 tcp_state_change(tp, TCPS_CLOSE_WAIT);
12498 break;
12501 * If still in FIN_WAIT_1 STATE FIN has not been
12502 * acked so enter the CLOSING state.
12504 case TCPS_FIN_WAIT_1:
12505 rack_timer_cancel(tp, rack,
12506 rack->r_ctl.rc_rcvtime, __LINE__);
12507 tcp_state_change(tp, TCPS_CLOSING);
12508 break;
12511 * In FIN_WAIT_2 state enter the TIME_WAIT state,
12512 * starting the time-wait timer, turning off the
12513 * other standard timers.
12515 case TCPS_FIN_WAIT_2:
12516 rack_timer_cancel(tp, rack,
12517 rack->r_ctl.rc_rcvtime, __LINE__);
12518 tcp_twstart(tp);
12519 return (1);
12523 * Return any desired output.
12525 if ((tp->t_flags & TF_ACKNOW) ||
12526 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
12527 rack->r_wanted_output = 1;
12529 return (0);
12533 * Here nothing is really faster, its just that we
12534 * have broken out the fast-data path also just like
12535 * the fast-ack.
12537 static int
12538 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
12539 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12540 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
12542 int32_t nsegs;
12543 int32_t newsize = 0; /* automatic sockbuf scaling */
12544 struct tcp_rack *rack;
12545 #ifdef NETFLIX_SB_LIMITS
12546 u_int mcnt, appended;
12547 #endif
12550 * If last ACK falls within this segment's sequence numbers, record
12551 * the timestamp. NOTE that the test is modified according to the
12552 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
12554 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
12555 return (0);
12557 if (tiwin && tiwin != tp->snd_wnd) {
12558 return (0);
12560 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
12561 return (0);
12563 if (__predict_false((to->to_flags & TOF_TS) &&
12564 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
12565 return (0);
12567 if (__predict_false((th->th_ack != tp->snd_una))) {
12568 return (0);
12570 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
12571 return (0);
12573 if ((to->to_flags & TOF_TS) != 0 &&
12574 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
12575 tp->ts_recent_age = tcp_ts_getticks();
12576 tp->ts_recent = to->to_tsval;
12578 rack = (struct tcp_rack *)tp->t_fb_ptr;
12580 * This is a pure, in-sequence data packet with nothing on the
12581 * reassembly queue and we have enough buffer space to take it.
12583 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12585 #ifdef NETFLIX_SB_LIMITS
12586 if (so->so_rcv.sb_shlim) {
12587 mcnt = m_memcnt(m);
12588 appended = 0;
12589 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
12590 CFO_NOSLEEP, NULL) == false) {
12591 counter_u64_add(tcp_sb_shlim_fails, 1);
12592 m_freem(m);
12593 return (1);
12596 #endif
12597 /* Clean receiver SACK report if present */
12598 if (tp->rcv_numsacks)
12599 tcp_clean_sackreport(tp);
12600 KMOD_TCPSTAT_INC(tcps_preddat);
12601 tp->rcv_nxt += tlen;
12602 if (tlen &&
12603 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
12604 (tp->t_fbyte_in == 0)) {
12605 tp->t_fbyte_in = ticks;
12606 if (tp->t_fbyte_in == 0)
12607 tp->t_fbyte_in = 1;
12608 if (tp->t_fbyte_out && tp->t_fbyte_in)
12609 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
12612 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
12614 tp->snd_wl1 = th->th_seq;
12616 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
12618 tp->rcv_up = tp->rcv_nxt;
12619 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
12620 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
12621 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
12623 /* Add data to socket buffer. */
12624 SOCK_RECVBUF_LOCK(so);
12625 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
12626 m_freem(m);
12627 } else {
12629 * Set new socket buffer size. Give up when limit is
12630 * reached.
12632 if (newsize)
12633 if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
12634 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
12635 m_adj(m, drop_hdrlen); /* delayed header drop */
12636 #ifdef NETFLIX_SB_LIMITS
12637 appended =
12638 #endif
12639 sbappendstream_locked(&so->so_rcv, m, 0);
12640 ctf_calc_rwin(so, tp);
12642 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
12643 /* NB: sorwakeup_locked() does an implicit unlock. */
12644 sorwakeup_locked(so);
12645 #ifdef NETFLIX_SB_LIMITS
12646 if (so->so_rcv.sb_shlim && mcnt != appended)
12647 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
12648 #endif
12649 rack_handle_delayed_ack(tp, rack, tlen, 0);
12650 if (tp->snd_una == tp->snd_max)
12651 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
12652 return (1);
12656 * This subfunction is used to try to highly optimize the
12657 * fast path. We again allow window updates that are
12658 * in sequence to remain in the fast-path. We also add
12659 * in the __predict's to attempt to help the compiler.
12660 * Note that if we return a 0, then we can *not* process
12661 * it and the caller should push the packet into the
12662 * slow-path.
12664 static int
12665 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
12666 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12667 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
12669 int32_t acked;
12670 int32_t nsegs;
12671 int32_t under_pacing = 0;
12672 struct tcp_rack *rack;
12674 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
12675 /* Old ack, behind (or duplicate to) the last one rcv'd */
12676 return (0);
12678 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
12679 /* Above what we have sent? */
12680 return (0);
12682 if (__predict_false(tiwin == 0)) {
12683 /* zero window */
12684 return (0);
12686 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
12687 /* We need a SYN or a FIN, unlikely.. */
12688 return (0);
12690 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
12691 /* Timestamp is behind .. old ack with seq wrap? */
12692 return (0);
12694 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
12695 /* Still recovering */
12696 return (0);
12698 rack = (struct tcp_rack *)tp->t_fb_ptr;
12699 if (rack->r_ctl.rc_sacked) {
12700 /* We have sack holes on our scoreboard */
12701 return (0);
12703 /* Ok if we reach here, we can process a fast-ack */
12704 if (rack->gp_ready &&
12705 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
12706 under_pacing = 1;
12708 nsegs = max(1, m->m_pkthdr.lro_nsegs);
12709 rack_log_ack(tp, to, th, 0, 0, NULL, NULL);
12710 /* Did the window get updated? */
12711 if (tiwin != tp->snd_wnd) {
12712 tp->snd_wnd = tiwin;
12713 rack_validate_fo_sendwin_up(tp, rack);
12714 tp->snd_wl1 = th->th_seq;
12715 if (tp->snd_wnd > tp->max_sndwnd)
12716 tp->max_sndwnd = tp->snd_wnd;
12718 /* Do we exit persists? */
12719 if ((rack->rc_in_persist != 0) &&
12720 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12721 rack->r_ctl.rc_pace_min_segs))) {
12722 rack_exit_persist(tp, rack, cts);
12724 /* Do we enter persists? */
12725 if ((rack->rc_in_persist == 0) &&
12726 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12727 TCPS_HAVEESTABLISHED(tp->t_state) &&
12728 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
12729 sbavail(&tptosocket(tp)->so_snd) &&
12730 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
12732 * Here the rwnd is less than
12733 * the pacing size, we are established,
12734 * nothing is outstanding, and there is
12735 * data to send. Enter persists.
12737 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack);
12740 * If last ACK falls within this segment's sequence numbers, record
12741 * the timestamp. NOTE that the test is modified according to the
12742 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
12744 if ((to->to_flags & TOF_TS) != 0 &&
12745 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
12746 tp->ts_recent_age = tcp_ts_getticks();
12747 tp->ts_recent = to->to_tsval;
12750 * This is a pure ack for outstanding data.
12752 KMOD_TCPSTAT_INC(tcps_predack);
12755 * "bad retransmit" recovery.
12757 if ((tp->t_flags & TF_PREVVALID) &&
12758 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
12759 tp->t_flags &= ~TF_PREVVALID;
12760 if (tp->t_rxtshift == 1 &&
12761 (int)(ticks - tp->t_badrxtwin) < 0)
12762 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
12765 * Recalculate the transmit timer / rtt.
12767 * Some boxes send broken timestamp replies during the SYN+ACK
12768 * phase, ignore timestamps of 0 or we could calculate a huge RTT
12769 * and blow up the retransmit timer.
12771 acked = BYTES_THIS_ACK(tp, th);
12773 #ifdef TCP_HHOOK
12774 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
12775 hhook_run_tcp_est_in(tp, th, to);
12776 #endif
12777 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
12778 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
12779 if (acked) {
12780 struct mbuf *mfree;
12782 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
12783 SOCK_SENDBUF_LOCK(so);
12784 mfree = sbcut_locked(&so->so_snd, acked);
12785 tp->snd_una = th->th_ack;
12786 /* Note we want to hold the sb lock through the sendmap adjust */
12787 rack_adjust_sendmap_head(rack, &so->so_snd);
12788 /* Wake up the socket if we have room to write more */
12789 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
12790 sowwakeup_locked(so);
12791 m_freem(mfree);
12792 tp->t_rxtshift = 0;
12793 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
12794 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
12795 rack->rc_tlp_in_progress = 0;
12796 rack->r_ctl.rc_tlp_cnt_out = 0;
12798 * If it is the RXT timer we want to
12799 * stop it, so we can restart a TLP.
12801 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
12802 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12804 #ifdef TCP_REQUEST_TRK
12805 rack_req_check_for_comp(rack, th->th_ack);
12806 #endif
12809 * Let the congestion control algorithm update congestion control
12810 * related information. This typically means increasing the
12811 * congestion window.
12813 if (tp->snd_wnd < ctf_outstanding(tp)) {
12814 /* The peer collapsed the window */
12815 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
12816 } else if (rack->rc_has_collapsed)
12817 rack_un_collapse_window(rack, __LINE__);
12818 if ((rack->r_collapse_point_valid) &&
12819 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point)))
12820 rack->r_collapse_point_valid = 0;
12822 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
12824 tp->snd_wl2 = th->th_ack;
12825 tp->t_dupacks = 0;
12826 m_freem(m);
12827 /* ND6_HINT(tp); *//* Some progress has been made. */
12830 * If all outstanding data are acked, stop retransmit timer,
12831 * otherwise restart timer using current (possibly backed-off)
12832 * value. If process is waiting for space, wakeup/selwakeup/signal.
12833 * If data are ready to send, let tcp_output decide between more
12834 * output or persist.
12836 if (under_pacing &&
12837 (rack->use_fixed_rate == 0) &&
12838 (rack->in_probe_rtt == 0) &&
12839 rack->rc_gp_dyn_mul &&
12840 rack->rc_always_pace) {
12841 /* Check if we are dragging bottom */
12842 rack_check_bottom_drag(tp, rack, so);
12844 if (tp->snd_una == tp->snd_max) {
12845 tp->t_flags &= ~TF_PREVVALID;
12846 rack->r_ctl.retran_during_recovery = 0;
12847 rack->rc_suspicious = 0;
12848 rack->r_ctl.dsack_byte_cnt = 0;
12849 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
12850 if (rack->r_ctl.rc_went_idle_time == 0)
12851 rack->r_ctl.rc_went_idle_time = 1;
12852 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
12853 if (sbavail(&tptosocket(tp)->so_snd) == 0)
12854 tp->t_acktime = 0;
12855 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12857 if (acked && rack->r_fast_output)
12858 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
12859 if (sbavail(&so->so_snd)) {
12860 rack->r_wanted_output = 1;
12862 return (1);
12866 * Return value of 1, the TCB is unlocked and most
12867 * likely gone, return value of 0, the TCP is still
12868 * locked.
12870 static int
12871 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
12872 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
12873 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
12875 int32_t ret_val = 0;
12876 int32_t orig_tlen = tlen;
12877 int32_t todrop;
12878 int32_t ourfinisacked = 0;
12879 struct tcp_rack *rack;
12881 INP_WLOCK_ASSERT(tptoinpcb(tp));
12883 ctf_calc_rwin(so, tp);
12885 * If the state is SYN_SENT: if seg contains an ACK, but not for our
12886 * SYN, drop the input. if seg contains a RST, then drop the
12887 * connection. if seg does not contain SYN, then drop it. Otherwise
12888 * this is an acceptable SYN segment initialize tp->rcv_nxt and
12889 * tp->irs if seg contains ack then advance tp->snd_una if seg
12890 * contains an ECE and ECN support is enabled, the stream is ECN
12891 * capable. if SYN has been acked change to ESTABLISHED else
12892 * SYN_RCVD state arrange for segment to be acked (eventually)
12893 * continue processing rest of data/controls.
12895 if ((thflags & TH_ACK) &&
12896 (SEQ_LEQ(th->th_ack, tp->iss) ||
12897 SEQ_GT(th->th_ack, tp->snd_max))) {
12898 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
12899 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
12900 return (1);
12902 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
12903 TCP_PROBE5(connect__refused, NULL, tp,
12904 mtod(m, const char *), tp, th);
12905 tp = tcp_drop(tp, ECONNREFUSED);
12906 ctf_do_drop(m, tp);
12907 return (1);
12909 if (thflags & TH_RST) {
12910 ctf_do_drop(m, tp);
12911 return (1);
12913 if (!(thflags & TH_SYN)) {
12914 ctf_do_drop(m, tp);
12915 return (1);
12917 tp->irs = th->th_seq;
12918 tcp_rcvseqinit(tp);
12919 rack = (struct tcp_rack *)tp->t_fb_ptr;
12920 if (thflags & TH_ACK) {
12921 int tfo_partial = 0;
12923 KMOD_TCPSTAT_INC(tcps_connects);
12924 soisconnected(so);
12925 #ifdef MAC
12926 mac_socketpeer_set_from_mbuf(m, so);
12927 #endif
12928 /* Do window scaling on this connection? */
12929 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
12930 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
12931 tp->rcv_scale = tp->request_r_scale;
12933 tp->rcv_adv += min(tp->rcv_wnd,
12934 TCP_MAXWIN << tp->rcv_scale);
12936 * If not all the data that was sent in the TFO SYN
12937 * has been acked, resend the remainder right away.
12939 if ((tp->t_flags & TF_FASTOPEN) &&
12940 (tp->snd_una != tp->snd_max)) {
12941 /* Was it a partial ack? */
12942 if (SEQ_LT(th->th_ack, tp->snd_max))
12943 tfo_partial = 1;
12946 * If there's data, delay ACK; if there's also a FIN ACKNOW
12947 * will be turned on later.
12949 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
12950 rack_timer_cancel(tp, rack,
12951 rack->r_ctl.rc_rcvtime, __LINE__);
12952 tp->t_flags |= TF_DELACK;
12953 } else {
12954 rack->r_wanted_output = 1;
12955 tp->t_flags |= TF_ACKNOW;
12958 tcp_ecn_input_syn_sent(tp, thflags, iptos);
12960 if (SEQ_GT(th->th_ack, tp->snd_una)) {
12962 * We advance snd_una for the
12963 * fast open case. If th_ack is
12964 * acknowledging data beyond
12965 * snd_una we can't just call
12966 * ack-processing since the
12967 * data stream in our send-map
12968 * will start at snd_una + 1 (one
12969 * beyond the SYN). If its just
12970 * equal we don't need to do that
12971 * and there is no send_map.
12973 tp->snd_una++;
12974 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) {
12976 * We sent a SYN with data, and thus have a
12977 * sendmap entry with a SYN set. Lets find it
12978 * and take off the send bit and the byte and
12979 * set it up to be what we send (send it next).
12981 struct rack_sendmap *rsm;
12983 rsm = tqhash_min(rack->r_ctl.tqh);
12984 if (rsm) {
12985 if (rsm->r_flags & RACK_HAS_SYN) {
12986 rsm->r_flags &= ~RACK_HAS_SYN;
12987 rsm->r_start++;
12989 rack->r_ctl.rc_resend = rsm;
12994 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
12995 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
12997 tp->t_starttime = ticks;
12998 if (tp->t_flags & TF_NEEDFIN) {
12999 tcp_state_change(tp, TCPS_FIN_WAIT_1);
13000 tp->t_flags &= ~TF_NEEDFIN;
13001 thflags &= ~TH_SYN;
13002 } else {
13003 tcp_state_change(tp, TCPS_ESTABLISHED);
13004 TCP_PROBE5(connect__established, NULL, tp,
13005 mtod(m, const char *), tp, th);
13006 rack_cc_conn_init(tp);
13008 } else {
13010 * Received initial SYN in SYN-SENT[*] state => simultaneous
13011 * open. If segment contains CC option and there is a
13012 * cached CC, apply TAO test. If it succeeds, connection is *
13013 * half-synchronized. Otherwise, do 3-way handshake:
13014 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
13015 * there was no CC option, clear cached CC value.
13017 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
13018 tcp_state_change(tp, TCPS_SYN_RECEIVED);
13021 * Advance th->th_seq to correspond to first data byte. If data,
13022 * trim to stay within window, dropping FIN if necessary.
13024 th->th_seq++;
13025 if (tlen > tp->rcv_wnd) {
13026 todrop = tlen - tp->rcv_wnd;
13027 m_adj(m, -todrop);
13028 tlen = tp->rcv_wnd;
13029 thflags &= ~TH_FIN;
13030 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
13031 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
13033 tp->snd_wl1 = th->th_seq - 1;
13034 tp->rcv_up = th->th_seq;
13036 * Client side of transaction: already sent SYN and data. If the
13037 * remote host used T/TCP to validate the SYN, our data will be
13038 * ACK'd; if so, enter normal data segment processing in the middle
13039 * of step 5, ack processing. Otherwise, goto step 6.
13041 if (thflags & TH_ACK) {
13042 /* For syn-sent we need to possibly update the rtt */
13043 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
13044 uint32_t t, mcts;
13046 mcts = tcp_ts_getticks();
13047 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
13048 if (!tp->t_rttlow || tp->t_rttlow > t)
13049 tp->t_rttlow = t;
13050 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
13051 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
13052 tcp_rack_xmit_timer_commit(rack, tp);
13054 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen))
13055 return (ret_val);
13056 /* We may have changed to FIN_WAIT_1 above */
13057 if (tp->t_state == TCPS_FIN_WAIT_1) {
13059 * In FIN_WAIT_1 STATE in addition to the processing
13060 * for the ESTABLISHED state if our FIN is now
13061 * acknowledged then enter FIN_WAIT_2.
13063 if (ourfinisacked) {
13065 * If we can't receive any more data, then
13066 * closing user can proceed. Starting the
13067 * timer is contrary to the specification,
13068 * but if we don't get a FIN we'll hang
13069 * forever.
13071 * XXXjl: we should release the tp also, and
13072 * use a compressed state.
13074 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13075 soisdisconnected(so);
13076 tcp_timer_activate(tp, TT_2MSL,
13077 (tcp_fast_finwait2_recycle ?
13078 tcp_finwait2_timeout :
13079 TP_MAXIDLE(tp)));
13081 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13085 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13086 tiwin, thflags, nxt_pkt));
13090 * Return value of 1, the TCB is unlocked and most
13091 * likely gone, return value of 0, the TCP is still
13092 * locked.
13094 static int
13095 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
13096 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13097 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13099 struct tcp_rack *rack;
13100 int32_t orig_tlen = tlen;
13101 int32_t ret_val = 0;
13102 int32_t ourfinisacked = 0;
13104 rack = (struct tcp_rack *)tp->t_fb_ptr;
13105 ctf_calc_rwin(so, tp);
13106 if ((thflags & TH_RST) ||
13107 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13108 return (ctf_process_rst(m, th, so, tp));
13109 if ((thflags & TH_ACK) &&
13110 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
13111 SEQ_GT(th->th_ack, tp->snd_max))) {
13112 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13113 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13114 return (1);
13116 if (tp->t_flags & TF_FASTOPEN) {
13118 * When a TFO connection is in SYN_RECEIVED, the
13119 * only valid packets are the initial SYN, a
13120 * retransmit/copy of the initial SYN (possibly with
13121 * a subset of the original data), a valid ACK, a
13122 * FIN, or a RST.
13124 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
13125 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13126 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13127 return (1);
13128 } else if (thflags & TH_SYN) {
13129 /* non-initial SYN is ignored */
13130 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
13131 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
13132 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
13133 ctf_do_drop(m, NULL);
13134 return (0);
13136 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
13137 ctf_do_drop(m, NULL);
13138 return (0);
13143 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13144 * it's less than ts_recent, drop it.
13146 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13147 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13148 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13149 return (ret_val);
13152 * In the SYN-RECEIVED state, validate that the packet belongs to
13153 * this connection before trimming the data to fit the receive
13154 * window. Check the sequence number versus IRS since we know the
13155 * sequence numbers haven't wrapped. This is a partial fix for the
13156 * "LAND" DoS attack.
13158 if (SEQ_LT(th->th_seq, tp->irs)) {
13159 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13160 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13161 return (1);
13163 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13164 return (ret_val);
13167 * If last ACK falls within this segment's sequence numbers, record
13168 * its timestamp. NOTE: 1) That the test incorporates suggestions
13169 * from the latest proposal of the tcplw@cray.com list (Braden
13170 * 1993/04/26). 2) That updating only on newer timestamps interferes
13171 * with our earlier PAWS tests, so this check should be solely
13172 * predicated on the sequence space of this segment. 3) That we
13173 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13174 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13175 * SEG.Len, This modified check allows us to overcome RFC1323's
13176 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13177 * p.869. In such cases, we can still calculate the RTT correctly
13178 * when RCV.NXT == Last.ACK.Sent.
13180 if ((to->to_flags & TOF_TS) != 0 &&
13181 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13182 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13183 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13184 tp->ts_recent_age = tcp_ts_getticks();
13185 tp->ts_recent = to->to_tsval;
13187 tp->snd_wnd = tiwin;
13188 rack_validate_fo_sendwin_up(tp, rack);
13190 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13191 * is on (half-synchronized state), then queue data for later
13192 * processing; else drop segment and return.
13194 if ((thflags & TH_ACK) == 0) {
13195 if (tp->t_flags & TF_FASTOPEN) {
13196 rack_cc_conn_init(tp);
13198 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13199 tiwin, thflags, nxt_pkt));
13201 KMOD_TCPSTAT_INC(tcps_connects);
13202 if (tp->t_flags & TF_SONOTCONN) {
13203 tp->t_flags &= ~TF_SONOTCONN;
13204 soisconnected(so);
13206 /* Do window scaling? */
13207 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
13208 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
13209 tp->rcv_scale = tp->request_r_scale;
13212 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
13213 * FIN-WAIT-1
13215 tp->t_starttime = ticks;
13216 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) {
13217 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
13218 tp->t_tfo_pending = NULL;
13220 if (tp->t_flags & TF_NEEDFIN) {
13221 tcp_state_change(tp, TCPS_FIN_WAIT_1);
13222 tp->t_flags &= ~TF_NEEDFIN;
13223 } else {
13224 tcp_state_change(tp, TCPS_ESTABLISHED);
13225 TCP_PROBE5(accept__established, NULL, tp,
13226 mtod(m, const char *), tp, th);
13228 * TFO connections call cc_conn_init() during SYN
13229 * processing. Calling it again here for such connections
13230 * is not harmless as it would undo the snd_cwnd reduction
13231 * that occurs when a TFO SYN|ACK is retransmitted.
13233 if (!(tp->t_flags & TF_FASTOPEN))
13234 rack_cc_conn_init(tp);
13237 * Account for the ACK of our SYN prior to
13238 * regular ACK processing below, except for
13239 * simultaneous SYN, which is handled later.
13241 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
13242 tp->snd_una++;
13244 * If segment contains data or ACK, will call tcp_reass() later; if
13245 * not, do so now to pass queued data to user.
13247 if (tlen == 0 && (thflags & TH_FIN) == 0) {
13248 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
13249 (struct mbuf *)0);
13250 if (tp->t_flags & TF_WAKESOR) {
13251 tp->t_flags &= ~TF_WAKESOR;
13252 /* NB: sorwakeup_locked() does an implicit unlock. */
13253 sorwakeup_locked(so);
13256 tp->snd_wl1 = th->th_seq - 1;
13257 /* For syn-recv we need to possibly update the rtt */
13258 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
13259 uint32_t t, mcts;
13261 mcts = tcp_ts_getticks();
13262 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
13263 if (!tp->t_rttlow || tp->t_rttlow > t)
13264 tp->t_rttlow = t;
13265 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
13266 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
13267 tcp_rack_xmit_timer_commit(rack, tp);
13269 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) {
13270 return (ret_val);
13272 if (tp->t_state == TCPS_FIN_WAIT_1) {
13273 /* We could have went to FIN_WAIT_1 (or EST) above */
13275 * In FIN_WAIT_1 STATE in addition to the processing for the
13276 * ESTABLISHED state if our FIN is now acknowledged then
13277 * enter FIN_WAIT_2.
13279 if (ourfinisacked) {
13281 * If we can't receive any more data, then closing
13282 * user can proceed. Starting the timer is contrary
13283 * to the specification, but if we don't get a FIN
13284 * we'll hang forever.
13286 * XXXjl: we should release the tp also, and use a
13287 * compressed state.
13289 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13290 soisdisconnected(so);
13291 tcp_timer_activate(tp, TT_2MSL,
13292 (tcp_fast_finwait2_recycle ?
13293 tcp_finwait2_timeout :
13294 TP_MAXIDLE(tp)));
13296 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13299 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13300 tiwin, thflags, nxt_pkt));
13304 * Return value of 1, the TCB is unlocked and most
13305 * likely gone, return value of 0, the TCP is still
13306 * locked.
13308 static int
13309 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
13310 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13311 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13313 int32_t ret_val = 0;
13314 int32_t orig_tlen = tlen;
13315 struct tcp_rack *rack;
13318 * Header prediction: check for the two common cases of a
13319 * uni-directional data xfer. If the packet has no control flags,
13320 * is in-sequence, the window didn't change and we're not
13321 * retransmitting, it's a candidate. If the length is zero and the
13322 * ack moved forward, we're the sender side of the xfer. Just free
13323 * the data acked & wake any higher level process that was blocked
13324 * waiting for space. If the length is non-zero and the ack didn't
13325 * move, we're the receiver side. If we're getting packets in-order
13326 * (the reassembly queue is empty), add the data toc The socket
13327 * buffer and note that we need a delayed ack. Make sure that the
13328 * hidden state-flags are also off. Since we check for
13329 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
13331 rack = (struct tcp_rack *)tp->t_fb_ptr;
13332 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
13333 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
13334 __predict_true(SEGQ_EMPTY(tp)) &&
13335 __predict_true(th->th_seq == tp->rcv_nxt)) {
13336 if (tlen == 0) {
13337 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
13338 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
13339 return (0);
13341 } else {
13342 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
13343 tiwin, nxt_pkt, iptos)) {
13344 return (0);
13348 ctf_calc_rwin(so, tp);
13350 if ((thflags & TH_RST) ||
13351 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13352 return (ctf_process_rst(m, th, so, tp));
13355 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13356 * synchronized state.
13358 if (thflags & TH_SYN) {
13359 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13360 return (ret_val);
13363 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13364 * it's less than ts_recent, drop it.
13366 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13367 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13368 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13369 return (ret_val);
13371 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13372 return (ret_val);
13375 * If last ACK falls within this segment's sequence numbers, record
13376 * its timestamp. NOTE: 1) That the test incorporates suggestions
13377 * from the latest proposal of the tcplw@cray.com list (Braden
13378 * 1993/04/26). 2) That updating only on newer timestamps interferes
13379 * with our earlier PAWS tests, so this check should be solely
13380 * predicated on the sequence space of this segment. 3) That we
13381 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13382 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13383 * SEG.Len, This modified check allows us to overcome RFC1323's
13384 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13385 * p.869. In such cases, we can still calculate the RTT correctly
13386 * when RCV.NXT == Last.ACK.Sent.
13388 if ((to->to_flags & TOF_TS) != 0 &&
13389 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13390 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13391 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13392 tp->ts_recent_age = tcp_ts_getticks();
13393 tp->ts_recent = to->to_tsval;
13396 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13397 * is on (half-synchronized state), then queue data for later
13398 * processing; else drop segment and return.
13400 if ((thflags & TH_ACK) == 0) {
13401 if (tp->t_flags & TF_NEEDSYN) {
13402 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13403 tiwin, thflags, nxt_pkt));
13405 } else if (tp->t_flags & TF_ACKNOW) {
13406 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13407 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13408 return (ret_val);
13409 } else {
13410 ctf_do_drop(m, NULL);
13411 return (0);
13415 * Ack processing.
13417 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) {
13418 return (ret_val);
13420 if (sbavail(&so->so_snd)) {
13421 if (ctf_progress_timeout_check(tp, true)) {
13422 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
13423 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13424 return (1);
13427 /* State changes only happen in rack_process_data() */
13428 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13429 tiwin, thflags, nxt_pkt));
13433 * Return value of 1, the TCB is unlocked and most
13434 * likely gone, return value of 0, the TCP is still
13435 * locked.
13437 static int
13438 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
13439 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13440 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13442 int32_t ret_val = 0;
13443 int32_t orig_tlen = tlen;
13445 ctf_calc_rwin(so, tp);
13446 if ((thflags & TH_RST) ||
13447 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13448 return (ctf_process_rst(m, th, so, tp));
13450 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13451 * synchronized state.
13453 if (thflags & TH_SYN) {
13454 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13455 return (ret_val);
13458 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13459 * it's less than ts_recent, drop it.
13461 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13462 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13463 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13464 return (ret_val);
13466 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13467 return (ret_val);
13470 * If last ACK falls within this segment's sequence numbers, record
13471 * its timestamp. NOTE: 1) That the test incorporates suggestions
13472 * from the latest proposal of the tcplw@cray.com list (Braden
13473 * 1993/04/26). 2) That updating only on newer timestamps interferes
13474 * with our earlier PAWS tests, so this check should be solely
13475 * predicated on the sequence space of this segment. 3) That we
13476 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13477 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13478 * SEG.Len, This modified check allows us to overcome RFC1323's
13479 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13480 * p.869. In such cases, we can still calculate the RTT correctly
13481 * when RCV.NXT == Last.ACK.Sent.
13483 if ((to->to_flags & TOF_TS) != 0 &&
13484 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13485 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13486 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13487 tp->ts_recent_age = tcp_ts_getticks();
13488 tp->ts_recent = to->to_tsval;
13491 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13492 * is on (half-synchronized state), then queue data for later
13493 * processing; else drop segment and return.
13495 if ((thflags & TH_ACK) == 0) {
13496 if (tp->t_flags & TF_NEEDSYN) {
13497 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13498 tiwin, thflags, nxt_pkt));
13500 } else if (tp->t_flags & TF_ACKNOW) {
13501 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13502 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13503 return (ret_val);
13504 } else {
13505 ctf_do_drop(m, NULL);
13506 return (0);
13510 * Ack processing.
13512 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) {
13513 return (ret_val);
13515 if (sbavail(&so->so_snd)) {
13516 if (ctf_progress_timeout_check(tp, true)) {
13517 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13518 tp, tick, PROGRESS_DROP, __LINE__);
13519 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13520 return (1);
13523 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13524 tiwin, thflags, nxt_pkt));
13527 static int
13528 rack_check_data_after_close(struct mbuf *m,
13529 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
13531 struct tcp_rack *rack;
13533 rack = (struct tcp_rack *)tp->t_fb_ptr;
13534 if (rack->rc_allow_data_af_clo == 0) {
13535 close_now:
13536 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
13537 /* tcp_close will kill the inp pre-log the Reset */
13538 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13539 tp = tcp_close(tp);
13540 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
13541 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
13542 return (1);
13544 if (sbavail(&so->so_snd) == 0)
13545 goto close_now;
13546 /* Ok we allow data that is ignored and a followup reset */
13547 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
13548 tp->rcv_nxt = th->th_seq + *tlen;
13549 tp->t_flags2 |= TF2_DROP_AF_DATA;
13550 rack->r_wanted_output = 1;
13551 *tlen = 0;
13552 return (0);
13556 * Return value of 1, the TCB is unlocked and most
13557 * likely gone, return value of 0, the TCP is still
13558 * locked.
13560 static int
13561 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
13562 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13563 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13565 int32_t ret_val = 0;
13566 int32_t orig_tlen = tlen;
13567 int32_t ourfinisacked = 0;
13569 ctf_calc_rwin(so, tp);
13571 if ((thflags & TH_RST) ||
13572 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13573 return (ctf_process_rst(m, th, so, tp));
13575 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13576 * synchronized state.
13578 if (thflags & TH_SYN) {
13579 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13580 return (ret_val);
13583 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13584 * it's less than ts_recent, drop it.
13586 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13587 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13588 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13589 return (ret_val);
13591 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13592 return (ret_val);
13595 * If new data are received on a connection after the user processes
13596 * are gone, then RST the other end.
13598 if ((tp->t_flags & TF_CLOSED) && tlen &&
13599 rack_check_data_after_close(m, tp, &tlen, th, so))
13600 return (1);
13602 * If last ACK falls within this segment's sequence numbers, record
13603 * its timestamp. NOTE: 1) That the test incorporates suggestions
13604 * from the latest proposal of the tcplw@cray.com list (Braden
13605 * 1993/04/26). 2) That updating only on newer timestamps interferes
13606 * with our earlier PAWS tests, so this check should be solely
13607 * predicated on the sequence space of this segment. 3) That we
13608 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13609 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13610 * SEG.Len, This modified check allows us to overcome RFC1323's
13611 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13612 * p.869. In such cases, we can still calculate the RTT correctly
13613 * when RCV.NXT == Last.ACK.Sent.
13615 if ((to->to_flags & TOF_TS) != 0 &&
13616 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13617 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13618 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13619 tp->ts_recent_age = tcp_ts_getticks();
13620 tp->ts_recent = to->to_tsval;
13623 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13624 * is on (half-synchronized state), then queue data for later
13625 * processing; else drop segment and return.
13627 if ((thflags & TH_ACK) == 0) {
13628 if (tp->t_flags & TF_NEEDSYN) {
13629 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13630 tiwin, thflags, nxt_pkt));
13631 } else if (tp->t_flags & TF_ACKNOW) {
13632 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13633 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13634 return (ret_val);
13635 } else {
13636 ctf_do_drop(m, NULL);
13637 return (0);
13641 * Ack processing.
13643 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) {
13644 return (ret_val);
13646 if (ourfinisacked) {
13648 * If we can't receive any more data, then closing user can
13649 * proceed. Starting the timer is contrary to the
13650 * specification, but if we don't get a FIN we'll hang
13651 * forever.
13653 * XXXjl: we should release the tp also, and use a
13654 * compressed state.
13656 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13657 soisdisconnected(so);
13658 tcp_timer_activate(tp, TT_2MSL,
13659 (tcp_fast_finwait2_recycle ?
13660 tcp_finwait2_timeout :
13661 TP_MAXIDLE(tp)));
13663 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13665 if (sbavail(&so->so_snd)) {
13666 if (ctf_progress_timeout_check(tp, true)) {
13667 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13668 tp, tick, PROGRESS_DROP, __LINE__);
13669 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13670 return (1);
13673 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13674 tiwin, thflags, nxt_pkt));
13678 * Return value of 1, the TCB is unlocked and most
13679 * likely gone, return value of 0, the TCP is still
13680 * locked.
13682 static int
13683 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
13684 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13685 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13687 int32_t ret_val = 0;
13688 int32_t orig_tlen = tlen;
13689 int32_t ourfinisacked = 0;
13691 ctf_calc_rwin(so, tp);
13693 if ((thflags & TH_RST) ||
13694 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13695 return (ctf_process_rst(m, th, so, tp));
13697 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13698 * synchronized state.
13700 if (thflags & TH_SYN) {
13701 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13702 return (ret_val);
13705 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13706 * it's less than ts_recent, drop it.
13708 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13709 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13710 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13711 return (ret_val);
13713 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13714 return (ret_val);
13717 * If last ACK falls within this segment's sequence numbers, record
13718 * its timestamp. NOTE: 1) That the test incorporates suggestions
13719 * from the latest proposal of the tcplw@cray.com list (Braden
13720 * 1993/04/26). 2) That updating only on newer timestamps interferes
13721 * with our earlier PAWS tests, so this check should be solely
13722 * predicated on the sequence space of this segment. 3) That we
13723 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13724 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13725 * SEG.Len, This modified check allows us to overcome RFC1323's
13726 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13727 * p.869. In such cases, we can still calculate the RTT correctly
13728 * when RCV.NXT == Last.ACK.Sent.
13730 if ((to->to_flags & TOF_TS) != 0 &&
13731 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13732 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13733 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13734 tp->ts_recent_age = tcp_ts_getticks();
13735 tp->ts_recent = to->to_tsval;
13738 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13739 * is on (half-synchronized state), then queue data for later
13740 * processing; else drop segment and return.
13742 if ((thflags & TH_ACK) == 0) {
13743 if (tp->t_flags & TF_NEEDSYN) {
13744 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13745 tiwin, thflags, nxt_pkt));
13746 } else if (tp->t_flags & TF_ACKNOW) {
13747 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13748 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13749 return (ret_val);
13750 } else {
13751 ctf_do_drop(m, NULL);
13752 return (0);
13756 * Ack processing.
13758 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) {
13759 return (ret_val);
13761 if (ourfinisacked) {
13762 tcp_twstart(tp);
13763 m_freem(m);
13764 return (1);
13766 if (sbavail(&so->so_snd)) {
13767 if (ctf_progress_timeout_check(tp, true)) {
13768 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13769 tp, tick, PROGRESS_DROP, __LINE__);
13770 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13771 return (1);
13774 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13775 tiwin, thflags, nxt_pkt));
13779 * Return value of 1, the TCB is unlocked and most
13780 * likely gone, return value of 0, the TCP is still
13781 * locked.
13783 static int
13784 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
13785 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13786 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13788 int32_t ret_val = 0;
13789 int32_t orig_tlen;
13790 int32_t ourfinisacked = 0;
13792 ctf_calc_rwin(so, tp);
13794 if ((thflags & TH_RST) ||
13795 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13796 return (ctf_process_rst(m, th, so, tp));
13798 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13799 * synchronized state.
13801 if (thflags & TH_SYN) {
13802 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13803 return (ret_val);
13806 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13807 * it's less than ts_recent, drop it.
13809 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13810 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13811 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13812 return (ret_val);
13814 orig_tlen = tlen;
13815 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13816 return (ret_val);
13819 * If last ACK falls within this segment's sequence numbers, record
13820 * its timestamp. NOTE: 1) That the test incorporates suggestions
13821 * from the latest proposal of the tcplw@cray.com list (Braden
13822 * 1993/04/26). 2) That updating only on newer timestamps interferes
13823 * with our earlier PAWS tests, so this check should be solely
13824 * predicated on the sequence space of this segment. 3) That we
13825 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13826 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13827 * SEG.Len, This modified check allows us to overcome RFC1323's
13828 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13829 * p.869. In such cases, we can still calculate the RTT correctly
13830 * when RCV.NXT == Last.ACK.Sent.
13832 if ((to->to_flags & TOF_TS) != 0 &&
13833 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13834 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13835 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13836 tp->ts_recent_age = tcp_ts_getticks();
13837 tp->ts_recent = to->to_tsval;
13840 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13841 * is on (half-synchronized state), then queue data for later
13842 * processing; else drop segment and return.
13844 if ((thflags & TH_ACK) == 0) {
13845 if (tp->t_flags & TF_NEEDSYN) {
13846 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13847 tiwin, thflags, nxt_pkt));
13848 } else if (tp->t_flags & TF_ACKNOW) {
13849 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13850 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13851 return (ret_val);
13852 } else {
13853 ctf_do_drop(m, NULL);
13854 return (0);
13858 * case TCPS_LAST_ACK: Ack processing.
13860 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) {
13861 return (ret_val);
13863 if (ourfinisacked) {
13864 tp = tcp_close(tp);
13865 ctf_do_drop(m, tp);
13866 return (1);
13868 if (sbavail(&so->so_snd)) {
13869 if (ctf_progress_timeout_check(tp, true)) {
13870 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13871 tp, tick, PROGRESS_DROP, __LINE__);
13872 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13873 return (1);
13876 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13877 tiwin, thflags, nxt_pkt));
13881 * Return value of 1, the TCB is unlocked and most
13882 * likely gone, return value of 0, the TCP is still
13883 * locked.
13885 static int
13886 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
13887 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
13888 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
13890 int32_t ret_val = 0;
13891 int32_t orig_tlen = tlen;
13892 int32_t ourfinisacked = 0;
13894 ctf_calc_rwin(so, tp);
13896 /* Reset receive buffer auto scaling when not in bulk receive mode. */
13897 if ((thflags & TH_RST) ||
13898 (tp->t_fin_is_rst && (thflags & TH_FIN)))
13899 return (ctf_process_rst(m, th, so, tp));
13901 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
13902 * synchronized state.
13904 if (thflags & TH_SYN) {
13905 ctf_challenge_ack(m, th, tp, iptos, &ret_val);
13906 return (ret_val);
13909 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
13910 * it's less than ts_recent, drop it.
13912 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
13913 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
13914 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
13915 return (ret_val);
13917 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
13918 return (ret_val);
13921 * If new data are received on a connection after the user processes
13922 * are gone, then RST the other end.
13924 if ((tp->t_flags & TF_CLOSED) && tlen &&
13925 rack_check_data_after_close(m, tp, &tlen, th, so))
13926 return (1);
13928 * If last ACK falls within this segment's sequence numbers, record
13929 * its timestamp. NOTE: 1) That the test incorporates suggestions
13930 * from the latest proposal of the tcplw@cray.com list (Braden
13931 * 1993/04/26). 2) That updating only on newer timestamps interferes
13932 * with our earlier PAWS tests, so this check should be solely
13933 * predicated on the sequence space of this segment. 3) That we
13934 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
13935 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
13936 * SEG.Len, This modified check allows us to overcome RFC1323's
13937 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
13938 * p.869. In such cases, we can still calculate the RTT correctly
13939 * when RCV.NXT == Last.ACK.Sent.
13941 if ((to->to_flags & TOF_TS) != 0 &&
13942 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
13943 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
13944 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
13945 tp->ts_recent_age = tcp_ts_getticks();
13946 tp->ts_recent = to->to_tsval;
13949 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
13950 * is on (half-synchronized state), then queue data for later
13951 * processing; else drop segment and return.
13953 if ((thflags & TH_ACK) == 0) {
13954 if (tp->t_flags & TF_NEEDSYN) {
13955 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13956 tiwin, thflags, nxt_pkt));
13957 } else if (tp->t_flags & TF_ACKNOW) {
13958 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
13959 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
13960 return (ret_val);
13961 } else {
13962 ctf_do_drop(m, NULL);
13963 return (0);
13967 * Ack processing.
13969 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) {
13970 return (ret_val);
13972 if (sbavail(&so->so_snd)) {
13973 if (ctf_progress_timeout_check(tp, true)) {
13974 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13975 tp, tick, PROGRESS_DROP, __LINE__);
13976 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13977 return (1);
13980 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
13981 tiwin, thflags, nxt_pkt));
13984 static void inline
13985 rack_clear_rate_sample(struct tcp_rack *rack)
13987 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
13988 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
13989 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
13992 static void
13993 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
13995 uint64_t bw_est, rate_wanted;
13996 int chged = 0;
13997 uint32_t user_max, orig_min, orig_max;
13999 #ifdef TCP_REQUEST_TRK
14000 if (rack->rc_hybrid_mode &&
14001 (rack->r_ctl.rc_pace_max_segs != 0) &&
14002 (rack_hybrid_allow_set_maxseg == 1) &&
14003 (rack->r_ctl.rc_last_sft != NULL)) {
14004 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS;
14005 return;
14007 #endif
14008 orig_min = rack->r_ctl.rc_pace_min_segs;
14009 orig_max = rack->r_ctl.rc_pace_max_segs;
14010 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
14011 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
14012 chged = 1;
14013 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
14014 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
14015 if (user_max != rack->r_ctl.rc_pace_max_segs)
14016 chged = 1;
14018 if (rack->rc_force_max_seg) {
14019 rack->r_ctl.rc_pace_max_segs = user_max;
14020 } else if (rack->use_fixed_rate) {
14021 bw_est = rack_get_bw(rack);
14022 if ((rack->r_ctl.crte == NULL) ||
14023 (bw_est != rack->r_ctl.crte->rate)) {
14024 rack->r_ctl.rc_pace_max_segs = user_max;
14025 } else {
14026 /* We are pacing right at the hardware rate */
14027 uint32_t segsiz, pace_one;
14029 if (rack_pace_one_seg ||
14030 (rack->r_ctl.rc_user_set_min_segs == 1))
14031 pace_one = 1;
14032 else
14033 pace_one = 0;
14034 segsiz = min(ctf_fixed_maxseg(tp),
14035 rack->r_ctl.rc_pace_min_segs);
14036 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(
14037 tp, bw_est, segsiz, pace_one,
14038 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
14040 } else if (rack->rc_always_pace) {
14041 if (rack->r_ctl.gp_bw ||
14042 rack->r_ctl.init_rate) {
14043 /* We have a rate of some sort set */
14044 uint32_t orig;
14046 bw_est = rack_get_bw(rack);
14047 orig = rack->r_ctl.rc_pace_max_segs;
14048 if (fill_override)
14049 rate_wanted = *fill_override;
14050 else
14051 rate_wanted = rack_get_gp_est(rack);
14052 if (rate_wanted) {
14053 /* We have something */
14054 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
14055 rate_wanted,
14056 ctf_fixed_maxseg(rack->rc_tp));
14057 } else
14058 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
14059 if (orig != rack->r_ctl.rc_pace_max_segs)
14060 chged = 1;
14061 } else if ((rack->r_ctl.gp_bw == 0) &&
14062 (rack->r_ctl.rc_pace_max_segs == 0)) {
14064 * If we have nothing limit us to bursting
14065 * out IW sized pieces.
14067 chged = 1;
14068 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
14071 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
14072 chged = 1;
14073 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
14075 if (chged)
14076 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
14080 static void
14081 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags)
14083 #ifdef INET6
14084 struct ip6_hdr *ip6 = NULL;
14085 #endif
14086 #ifdef INET
14087 struct ip *ip = NULL;
14088 #endif
14089 struct udphdr *udp = NULL;
14091 /* Ok lets fill in the fast block, it can only be used with no IP options! */
14092 #ifdef INET6
14093 if (rack->r_is_v6) {
14094 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
14095 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
14096 if (tp->t_port) {
14097 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
14098 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
14099 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
14100 udp->uh_dport = tp->t_port;
14101 rack->r_ctl.fsb.udp = udp;
14102 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
14103 } else
14105 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
14106 rack->r_ctl.fsb.udp = NULL;
14108 tcpip_fillheaders(rack->rc_inp,
14109 tp->t_port,
14110 ip6, rack->r_ctl.fsb.th);
14111 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL);
14112 } else
14113 #endif /* INET6 */
14114 #ifdef INET
14116 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
14117 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
14118 if (tp->t_port) {
14119 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
14120 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
14121 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
14122 udp->uh_dport = tp->t_port;
14123 rack->r_ctl.fsb.udp = udp;
14124 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
14125 } else
14127 rack->r_ctl.fsb.udp = NULL;
14128 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
14130 tcpip_fillheaders(rack->rc_inp,
14131 tp->t_port,
14132 ip, rack->r_ctl.fsb.th);
14133 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl;
14135 #endif
14136 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0),
14137 (long)TCP_MAXWIN << tp->rcv_scale);
14138 rack->r_fsb_inited = 1;
14141 static int
14142 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
14145 * Allocate the larger of spaces V6 if available else just
14146 * V4 and include udphdr (overbook)
14148 #ifdef INET6
14149 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
14150 #else
14151 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
14152 #endif
14153 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
14154 M_TCPFSB, M_NOWAIT|M_ZERO);
14155 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
14156 return (ENOMEM);
14158 rack->r_fsb_inited = 0;
14159 return (0);
14162 static void
14163 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod)
14166 * Types of logs (mod value)
14167 * 20 - Initial round setup
14168 * 21 - Rack declares a new round.
14170 struct tcpcb *tp;
14172 tp = rack->rc_tp;
14173 if (tcp_bblogging_on(tp)) {
14174 union tcp_log_stackspecific log;
14175 struct timeval tv;
14177 memset(&log, 0, sizeof(log));
14178 log.u_bbr.flex1 = rack->r_ctl.current_round;
14179 log.u_bbr.flex2 = rack->r_ctl.roundends;
14180 log.u_bbr.flex3 = high_seq;
14181 log.u_bbr.flex4 = tp->snd_max;
14182 log.u_bbr.flex8 = mod;
14183 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14184 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes;
14185 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes;
14186 TCP_LOG_EVENTP(tp, NULL,
14187 &tptosocket(tp)->so_rcv,
14188 &tptosocket(tp)->so_snd,
14189 TCP_HYSTART, 0,
14190 0, &log, false, &tv);
14194 static void
14195 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack)
14197 rack->rack_deferred_inited = 1;
14198 rack->r_ctl.roundends = tp->snd_max;
14199 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
14200 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
14203 static void
14204 rack_init_retransmit_value(struct tcp_rack *rack, int ctl)
14206 /* Retransmit bit controls.
14208 * The setting of these values control one of
14209 * three settings you can have and dictate
14210 * how rack does retransmissions. Note this
14211 * is in *any* mode i.e. pacing on or off DGP
14212 * fixed rate pacing, or just bursting rack.
14214 * 1 - Use full sized retransmits i.e. limit
14215 * the size to whatever the pace_max_segments
14216 * size is.
14218 * 2 - Use pacer min granularity as a guide to
14219 * the size combined with the current calculated
14220 * goodput b/w measurement. So for example if
14221 * the goodput is measured at 20Mbps we would
14222 * calculate 8125 (pacer minimum 250usec in
14223 * that b/w) and then round it up to the next
14224 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes.
14226 * 0 - The rack default 1 MSS (anything not 0/1/2
14227 * fall here too if we are setting via rack_init()).
14230 if (ctl == 1) {
14231 rack->full_size_rxt = 1;
14232 rack->shape_rxt_to_pacing_min = 0;
14233 } else if (ctl == 2) {
14234 rack->full_size_rxt = 0;
14235 rack->shape_rxt_to_pacing_min = 1;
14236 } else {
14237 rack->full_size_rxt = 0;
14238 rack->shape_rxt_to_pacing_min = 0;
14242 static void
14243 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod,
14244 uint32_t flex1,
14245 uint32_t flex2,
14246 uint32_t flex3)
14248 if (tcp_bblogging_on(rack->rc_tp)) {
14249 union tcp_log_stackspecific log;
14250 struct timeval tv;
14252 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14253 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14254 log.u_bbr.flex8 = mod;
14255 log.u_bbr.flex1 = flex1;
14256 log.u_bbr.flex2 = flex2;
14257 log.u_bbr.flex3 = flex3;
14258 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0,
14259 0, &log, false, NULL, __func__, __LINE__, &tv);
14263 static int
14264 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr)
14266 struct tcp_rack *rack;
14267 struct rack_sendmap *rsm;
14268 int i;
14271 rack = (struct tcp_rack *)tp->t_fb_ptr;
14272 switch (reqr->req) {
14273 case TCP_QUERY_SENDMAP:
14274 if ((reqr->req_param == tp->snd_max) ||
14275 (tp->snd_max == tp->snd_una)){
14276 /* Unlikely */
14277 return (0);
14279 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param);
14280 if (rsm == NULL) {
14281 /* Can't find that seq -- unlikely */
14282 return (0);
14284 reqr->sendmap_start = rsm->r_start;
14285 reqr->sendmap_end = rsm->r_end;
14286 reqr->sendmap_send_cnt = rsm->r_rtr_cnt;
14287 reqr->sendmap_fas = rsm->r_fas;
14288 if (reqr->sendmap_send_cnt > SNDMAP_NRTX)
14289 reqr->sendmap_send_cnt = SNDMAP_NRTX;
14290 for(i=0; i<reqr->sendmap_send_cnt; i++)
14291 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i];
14292 reqr->sendmap_ack_arrival = rsm->r_ack_arrival;
14293 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK;
14294 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes;
14295 reqr->sendmap_dupacks = rsm->r_dupack;
14296 rack_log_chg_info(tp, rack, 1,
14297 rsm->r_start,
14298 rsm->r_end,
14299 rsm->r_flags);
14300 return(1);
14301 break;
14302 case TCP_QUERY_TIMERS_UP:
14303 if (rack->r_ctl.rc_hpts_flags == 0) {
14304 /* no timers up */
14305 return (0);
14307 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags;
14308 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14309 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to;
14311 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
14312 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp;
14314 rack_log_chg_info(tp, rack, 2,
14315 rack->r_ctl.rc_hpts_flags,
14316 rack->r_ctl.rc_last_output_to,
14317 rack->r_ctl.rc_timer_exp);
14318 return (1);
14319 break;
14320 case TCP_QUERY_RACK_TIMES:
14321 /* Reordering items */
14322 reqr->rack_num_dsacks = rack->r_ctl.num_dsack;
14323 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts;
14324 /* Timerstamps and timers */
14325 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time;
14326 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt;
14327 reqr->rack_rtt = rack->rc_rack_rtt;
14328 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time;
14329 reqr->rack_srtt_measured = rack->rc_srtt_measure_made;
14330 /* PRR data */
14331 reqr->rack_sacked = rack->r_ctl.rc_sacked;
14332 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt;
14333 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered;
14334 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs;
14335 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt;
14336 reqr->rack_prr_out = rack->r_ctl.rc_prr_out;
14337 /* TLP and persists info */
14338 reqr->rack_tlp_out = rack->rc_tlp_in_progress;
14339 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out;
14340 if (rack->rc_in_persist) {
14341 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time;
14342 reqr->rack_in_persist = 1;
14343 } else {
14344 reqr->rack_time_went_idle = 0;
14345 reqr->rack_in_persist = 0;
14347 if (rack->r_wanted_output)
14348 reqr->rack_wanted_output = 1;
14349 else
14350 reqr->rack_wanted_output = 0;
14351 return (1);
14352 break;
14353 default:
14354 return (-EINVAL);
14358 static void
14359 rack_switch_failed(struct tcpcb *tp)
14362 * This method gets called if a stack switch was
14363 * attempted and it failed. We are left
14364 * but our hpts timers were stopped and we
14365 * need to validate time units and t_flags2.
14367 struct tcp_rack *rack;
14368 struct timeval tv;
14369 uint32_t cts;
14370 uint32_t toval;
14371 struct hpts_diag diag;
14373 rack = (struct tcp_rack *)tp->t_fb_ptr;
14374 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
14375 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
14376 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
14377 else
14378 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
14379 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
14380 tp->t_flags2 |= TF2_MBUF_ACKCMP;
14381 if (tp->t_in_hpts > IHPTS_NONE) {
14382 /* Strange */
14383 return;
14385 cts = tcp_get_usecs(&tv);
14386 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
14387 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
14388 toval = rack->r_ctl.rc_last_output_to - cts;
14389 } else {
14390 /* one slot please */
14391 toval = HPTS_TICKS_PER_SLOT;
14393 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
14394 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
14395 toval = rack->r_ctl.rc_timer_exp - cts;
14396 } else {
14397 /* one slot please */
14398 toval = HPTS_TICKS_PER_SLOT;
14400 } else
14401 toval = HPTS_TICKS_PER_SLOT;
14402 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
14403 __LINE__, &diag);
14404 rack_log_hpts_diag(rack, cts, &diag, &tv);
14407 static int
14408 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr)
14410 struct rack_sendmap *rsm, *ersm;
14411 int insret __diagused;
14413 * When initing outstanding, we must be quite careful
14414 * to not refer to tp->t_fb_ptr. This has the old rack
14415 * pointer in it, not the "new" one (when we are doing
14416 * a stack switch).
14420 if (tp->t_fb->tfb_chg_query == NULL) {
14421 /* Create a send map for the current outstanding data */
14423 rsm = rack_alloc(rack);
14424 if (rsm == NULL) {
14425 uma_zfree(rack_pcb_zone, ptr);
14426 return (ENOMEM);
14428 rsm->r_no_rtt_allowed = 1;
14429 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
14430 rsm->r_rtr_cnt = 1;
14431 rsm->r_rtr_bytes = 0;
14432 if (tp->t_flags & TF_SENTFIN)
14433 rsm->r_flags |= RACK_HAS_FIN;
14434 rsm->r_end = tp->snd_max;
14435 if (tp->snd_una == tp->iss) {
14436 /* The data space is one beyond snd_una */
14437 rsm->r_flags |= RACK_HAS_SYN;
14438 rsm->r_start = tp->iss;
14439 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
14440 } else
14441 rsm->r_start = tp->snd_una;
14442 rsm->r_dupack = 0;
14443 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
14444 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
14445 if (rsm->m) {
14446 rsm->orig_m_len = rsm->m->m_len;
14447 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14448 } else {
14449 rsm->orig_m_len = 0;
14450 rsm->orig_t_space = 0;
14452 } else {
14454 * This can happen if we have a stand-alone FIN or
14455 * SYN.
14457 rsm->m = NULL;
14458 rsm->orig_m_len = 0;
14459 rsm->orig_t_space = 0;
14460 rsm->soff = 0;
14462 #ifdef INVARIANTS
14463 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14464 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
14465 insret, rack, rsm);
14467 #else
14468 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14469 #endif
14470 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14471 rsm->r_in_tmap = 1;
14472 } else {
14473 /* We have a query mechanism, lets use it */
14474 struct tcp_query_resp qr;
14475 int i;
14476 tcp_seq at;
14478 at = tp->snd_una;
14479 while (at != tp->snd_max) {
14480 memset(&qr, 0, sizeof(qr));
14481 qr.req = TCP_QUERY_SENDMAP;
14482 qr.req_param = at;
14483 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0)
14484 break;
14485 /* Move forward */
14486 at = qr.sendmap_end;
14487 /* Now lets build the entry for this one */
14488 rsm = rack_alloc(rack);
14489 if (rsm == NULL) {
14490 uma_zfree(rack_pcb_zone, ptr);
14491 return (ENOMEM);
14493 memset(rsm, 0, sizeof(struct rack_sendmap));
14494 /* Now configure the rsm and insert it */
14495 rsm->r_dupack = qr.sendmap_dupacks;
14496 rsm->r_start = qr.sendmap_start;
14497 rsm->r_end = qr.sendmap_end;
14498 if (qr.sendmap_fas)
14499 rsm->r_fas = qr.sendmap_end;
14500 else
14501 rsm->r_fas = rsm->r_start - tp->snd_una;
14503 * We have carefully aligned the bits
14504 * so that all we have to do is copy over
14505 * the bits with the mask.
14507 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK;
14508 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes;
14509 rsm->r_rtr_cnt = qr.sendmap_send_cnt;
14510 rsm->r_ack_arrival = qr.sendmap_ack_arrival;
14511 for (i=0 ; i<rsm->r_rtr_cnt; i++)
14512 rsm->r_tim_lastsent[i] = qr.sendmap_time[i];
14513 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
14514 (rsm->r_start - tp->snd_una), &rsm->soff);
14515 if (rsm->m) {
14516 rsm->orig_m_len = rsm->m->m_len;
14517 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14518 } else {
14519 rsm->orig_m_len = 0;
14520 rsm->orig_t_space = 0;
14522 #ifdef INVARIANTS
14523 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14524 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
14525 insret, rack, rsm);
14527 #else
14528 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14529 #endif
14530 if ((rsm->r_flags & RACK_ACKED) == 0) {
14531 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) {
14532 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] >
14533 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) {
14535 * If the existing ersm was sent at
14536 * a later time than the new one, then
14537 * the new one should appear ahead of this
14538 * ersm.
14540 rsm->r_in_tmap = 1;
14541 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext);
14542 break;
14545 if (rsm->r_in_tmap == 0) {
14547 * Not found so shove it on the tail.
14549 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14550 rsm->r_in_tmap = 1;
14552 } else {
14553 if ((rack->r_ctl.rc_sacklast == NULL) ||
14554 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) {
14555 rack->r_ctl.rc_sacklast = rsm;
14558 rack_log_chg_info(tp, rack, 3,
14559 rsm->r_start,
14560 rsm->r_end,
14561 rsm->r_flags);
14564 return (0);
14568 static int32_t
14569 rack_init(struct tcpcb *tp, void **ptr)
14571 struct inpcb *inp = tptoinpcb(tp);
14572 struct tcp_rack *rack = NULL;
14573 uint32_t iwin, snt, us_cts;
14574 size_t sz;
14575 int err, no_query;
14577 tcp_hpts_init(tp);
14580 * First are we the initial or are we a switched stack?
14581 * If we are initing via tcp_newtcppcb the ptr passed
14582 * will be tp->t_fb_ptr. If its a stack switch that
14583 * has a previous stack we can query it will be a local
14584 * var that will in the end be set into t_fb_ptr.
14586 if (ptr == &tp->t_fb_ptr)
14587 no_query = 1;
14588 else
14589 no_query = 0;
14590 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
14591 if (*ptr == NULL) {
14593 * We need to allocate memory but cant. The INP and INP_INFO
14594 * locks and they are recursive (happens during setup. So a
14595 * scheme to drop the locks fails :(
14598 return(ENOMEM);
14600 memset(*ptr, 0, sizeof(struct tcp_rack));
14601 rack = (struct tcp_rack *)*ptr;
14602 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT);
14603 if (rack->r_ctl.tqh == NULL) {
14604 uma_zfree(rack_pcb_zone, rack);
14605 return(ENOMEM);
14607 tqhash_init(rack->r_ctl.tqh);
14608 TAILQ_INIT(&rack->r_ctl.rc_free);
14609 TAILQ_INIT(&rack->r_ctl.rc_tmap);
14610 rack->rc_tp = tp;
14611 rack->rc_inp = inp;
14612 /* Set the flag */
14613 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
14614 /* Probably not needed but lets be sure */
14615 rack_clear_rate_sample(rack);
14617 * Save off the default values, socket options will poke
14618 * at these if pacing is not on or we have not yet
14619 * reached where pacing is on (gp_ready/fixed enabled).
14620 * When they get set into the CC module (when gp_ready
14621 * is enabled or we enable fixed) then we will set these
14622 * values into the CC and place in here the old values
14623 * so we have a restoral. Then we will set the flag
14624 * rc_pacing_cc_set. That way whenever we turn off pacing
14625 * or switch off this stack, we will know to go restore
14626 * the saved values.
14628 * We specifically put into the beta the ecn value for pacing.
14630 rack->rc_new_rnd_needed = 1;
14631 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit;
14632 /* We want abe like behavior as well */
14634 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
14635 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
14636 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
14637 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
14638 if (rack_fill_cw_state)
14639 rack->rc_pace_to_cwnd = 1;
14640 if (rack_pacing_min_seg)
14641 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg;
14642 if (use_rack_rr)
14643 rack->use_rack_rr = 1;
14644 if (rack_dnd_default) {
14645 rack->rc_pace_dnd = 1;
14647 if (V_tcp_delack_enabled)
14648 tp->t_delayed_ack = 1;
14649 else
14650 tp->t_delayed_ack = 0;
14651 #ifdef TCP_ACCOUNTING
14652 if (rack_tcp_accounting) {
14653 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
14655 #endif
14656 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY;
14657 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc);
14658 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT);
14659 if (rack->r_ctl.pcm_s == NULL) {
14660 rack->r_ctl.pcm_i.cnt_alloc = 0;
14662 #ifdef NETFLIX_STATS
14663 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask;
14664 #endif
14665 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
14666 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
14667 if (rack_enable_shared_cwnd)
14668 rack->rack_enable_scwnd = 1;
14669 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
14670 rack->rc_user_set_max_segs = rack_hptsi_segments;
14671 rack->r_ctl.max_reduction = rack_max_reduce;
14672 rack->rc_force_max_seg = 0;
14673 TAILQ_INIT(&rack->r_ctl.opt_list);
14674 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
14675 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
14676 if (rack_hibeta_setting) {
14677 rack->rack_hibeta = 1;
14678 if ((rack_hibeta_setting >= 50) &&
14679 (rack_hibeta_setting <= 100)) {
14680 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting;
14681 rack->r_ctl.saved_hibeta = rack_hibeta_setting;
14683 } else {
14684 rack->r_ctl.saved_hibeta = 50;
14687 * We initialize to all ones so we never match 0
14688 * just in case the client sends in 0, it hopefully
14689 * will never have all 1's in ms :-)
14691 rack->r_ctl.last_tm_mark = 0xffffffffffffffff;
14692 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
14693 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
14694 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
14695 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
14696 rack->r_ctl.rc_highest_us_rtt = 0;
14697 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
14698 rack->pcm_enabled = rack_pcm_is_enabled;
14699 if (rack_fillcw_bw_cap)
14700 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap;
14701 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
14702 if (rack_use_cmp_acks)
14703 rack->r_use_cmp_ack = 1;
14704 if (rack_disable_prr)
14705 rack->rack_no_prr = 1;
14706 if (rack_gp_no_rec_chg)
14707 rack->rc_gp_no_rec_chg = 1;
14708 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
14709 rack->r_ctl.pacing_method |= RACK_REG_PACING;
14710 rack->rc_always_pace = 1;
14711 if (rack->rack_hibeta)
14712 rack_set_cc_pacing(rack);
14713 } else
14714 rack->rc_always_pace = 0;
14715 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
14716 rack->r_mbuf_queue = 1;
14717 else
14718 rack->r_mbuf_queue = 0;
14719 rack_set_pace_segments(tp, rack, __LINE__, NULL);
14720 if (rack_limits_scwnd)
14721 rack->r_limit_scw = 1;
14722 else
14723 rack->r_limit_scw = 0;
14724 rack_init_retransmit_value(rack, rack_rxt_controls);
14725 rack->rc_labc = V_tcp_abc_l_var;
14726 if (rack_honors_hpts_min_to)
14727 rack->r_use_hpts_min = 1;
14728 if (tp->snd_una != 0) {
14729 rack->rc_sendvars_notset = 0;
14731 * Make sure any TCP timers are not running.
14733 tcp_timer_stop(tp);
14734 } else {
14736 * Server side, we are called from the
14737 * syn-cache. This means none of the
14738 * snd_una/max are set yet so we have
14739 * to defer this until the first send.
14741 rack->rc_sendvars_notset = 1;
14744 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
14745 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
14746 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
14747 rack->r_ctl.rc_min_to = rack_min_to;
14748 microuptime(&rack->r_ctl.act_rcv_time);
14749 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
14750 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
14751 if (rack_hw_up_only)
14752 rack->r_up_only = 1;
14753 if (rack_do_dyn_mul) {
14754 /* When dynamic adjustment is on CA needs to start at 100% */
14755 rack->rc_gp_dyn_mul = 1;
14756 if (rack_do_dyn_mul >= 100)
14757 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
14758 } else
14759 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
14760 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
14761 if (rack_timely_off) {
14762 rack->rc_skip_timely = 1;
14764 if (rack->rc_skip_timely) {
14765 rack->r_ctl.rack_per_of_gp_rec = 90;
14766 rack->r_ctl.rack_per_of_gp_ca = 100;
14767 rack->r_ctl.rack_per_of_gp_ss = 250;
14769 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
14770 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
14771 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
14773 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
14774 rack_probertt_filter_life);
14775 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14776 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
14777 rack->r_ctl.rc_time_of_last_probertt = us_cts;
14778 rack->r_ctl.rc_went_idle_time = us_cts;
14779 rack->r_ctl.rc_time_probertt_starts = 0;
14781 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff;
14782 if (rack_rnd_cnt_req & 0x10000)
14783 rack->r_ctl.gate_to_fs = 1;
14784 rack->r_ctl.gp_gain_req = rack_gp_gain_req;
14785 if ((rack_rnd_cnt_req & 0x100) > 0) {
14788 if (rack_dsack_std_based & 0x1) {
14789 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
14790 rack->rc_rack_tmr_std_based = 1;
14792 if (rack_dsack_std_based & 0x2) {
14793 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
14794 rack->rc_rack_use_dsack = 1;
14796 /* We require at least one measurement, even if the sysctl is 0 */
14797 if (rack_req_measurements)
14798 rack->r_ctl.req_measurements = rack_req_measurements;
14799 else
14800 rack->r_ctl.req_measurements = 1;
14801 if (rack_enable_hw_pacing)
14802 rack->rack_hdw_pace_ena = 1;
14803 if (rack_hw_rate_caps)
14804 rack->r_rack_hw_rate_caps = 1;
14805 if (rack_non_rxt_use_cr)
14806 rack->rack_rec_nonrxt_use_cr = 1;
14807 /* Lets setup the fsb block */
14808 err = rack_init_fsb(tp, rack);
14809 if (err) {
14810 uma_zfree(rack_pcb_zone, *ptr);
14811 *ptr = NULL;
14812 return (err);
14814 if (rack_do_hystart) {
14815 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
14816 if (rack_do_hystart > 1)
14817 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
14818 if (rack_do_hystart > 2)
14819 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
14821 /* Log what we will do with queries */
14822 rack_log_chg_info(tp, rack, 7,
14823 no_query, 0, 0);
14824 if (rack_def_profile)
14825 rack_set_profile(rack, rack_def_profile);
14826 /* Cancel the GP measurement in progress */
14827 tp->t_flags &= ~TF_GPUTINPROG;
14828 if ((tp->t_state != TCPS_CLOSED) &&
14829 (tp->t_state != TCPS_TIME_WAIT)) {
14831 * We are already open, we may
14832 * need to adjust a few things.
14834 if (SEQ_GT(tp->snd_max, tp->iss))
14835 snt = tp->snd_max - tp->iss;
14836 else
14837 snt = 0;
14838 iwin = rc_init_window(rack);
14839 if ((snt < iwin) &&
14840 (no_query == 1)) {
14841 /* We are not past the initial window
14842 * on the first init (i.e. a stack switch
14843 * has not yet occured) so we need to make
14844 * sure cwnd and ssthresh is correct.
14846 if (tp->snd_cwnd < iwin)
14847 tp->snd_cwnd = iwin;
14849 * If we are within the initial window
14850 * we want ssthresh to be unlimited. Setting
14851 * it to the rwnd (which the default stack does
14852 * and older racks) is not really a good idea
14853 * since we want to be in SS and grow both the
14854 * cwnd and the rwnd (via dynamic rwnd growth). If
14855 * we set it to the rwnd then as the peer grows its
14856 * rwnd we will be stuck in CA and never hit SS.
14858 * Its far better to raise it up high (this takes the
14859 * risk that there as been a loss already, probably
14860 * we should have an indicator in all stacks of loss
14861 * but we don't), but considering the normal use this
14862 * is a risk worth taking. The consequences of not
14863 * hitting SS are far worse than going one more time
14864 * into it early on (before we have sent even a IW).
14865 * It is highly unlikely that we will have had a loss
14866 * before getting the IW out.
14868 tp->snd_ssthresh = 0xffffffff;
14871 * Any init based on sequence numbers
14872 * should be done in the deferred init path
14873 * since we can be CLOSED and not have them
14874 * inited when rack_init() is called. We
14875 * are not closed so lets call it.
14877 rack_deferred_init(tp, rack);
14879 if ((tp->t_state != TCPS_CLOSED) &&
14880 (tp->t_state != TCPS_TIME_WAIT) &&
14881 (no_query == 0) &&
14882 (tp->snd_una != tp->snd_max)) {
14883 err = rack_init_outstanding(tp, rack, us_cts, *ptr);
14884 if (err) {
14885 *ptr = NULL;
14886 return(err);
14889 rack_stop_all_timers(tp, rack);
14890 /* Setup all the t_flags2 */
14891 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
14892 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
14893 else
14894 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
14895 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
14896 tp->t_flags2 |= TF2_MBUF_ACKCMP;
14898 * Timers in Rack are kept in microseconds so lets
14899 * convert any initial incoming variables
14900 * from ticks into usecs. Note that we
14901 * also change the values of t_srtt and t_rttvar, if
14902 * they are non-zero. They are kept with a 5
14903 * bit decimal so we have to carefully convert
14904 * these to get the full precision.
14906 rack_convert_rtts(tp);
14907 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20);
14908 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) {
14909 /* We do not start any timers on DROPPED connections */
14910 if (tp->t_fb->tfb_chg_query == NULL) {
14911 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
14912 } else {
14913 struct tcp_query_resp qr;
14914 int ret;
14916 memset(&qr, 0, sizeof(qr));
14918 /* Get the misc time stamps and such for rack */
14919 qr.req = TCP_QUERY_RACK_TIMES;
14920 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
14921 if (ret == 1) {
14922 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts;
14923 rack->r_ctl.num_dsack = qr.rack_num_dsacks;
14924 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time;
14925 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt;
14926 rack->rc_rack_rtt = qr.rack_rtt;
14927 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time;
14928 rack->r_ctl.rc_sacked = qr.rack_sacked;
14929 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt;
14930 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered;
14931 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs;
14932 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt;
14933 rack->r_ctl.rc_prr_out = qr.rack_prr_out;
14934 if (qr.rack_tlp_out) {
14935 rack->rc_tlp_in_progress = 1;
14936 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out;
14937 } else {
14938 rack->rc_tlp_in_progress = 0;
14939 rack->r_ctl.rc_tlp_cnt_out = 0;
14941 if (qr.rack_srtt_measured)
14942 rack->rc_srtt_measure_made = 1;
14943 if (qr.rack_in_persist == 1) {
14944 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle;
14945 #ifdef NETFLIX_SHARED_CWND
14946 if (rack->r_ctl.rc_scw) {
14947 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
14948 rack->rack_scwnd_is_idle = 1;
14950 #endif
14951 rack->r_ctl.persist_lost_ends = 0;
14952 rack->probe_not_answered = 0;
14953 rack->forced_ack = 0;
14954 tp->t_rxtshift = 0;
14955 rack->rc_in_persist = 1;
14956 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
14957 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
14959 if (qr.rack_wanted_output)
14960 rack->r_wanted_output = 1;
14961 rack_log_chg_info(tp, rack, 6,
14962 qr.rack_min_rtt,
14963 qr.rack_rtt,
14964 qr.rack_reorder_ts);
14966 /* Get the old stack timers */
14967 qr.req_param = 0;
14968 qr.req = TCP_QUERY_TIMERS_UP;
14969 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
14970 if (ret) {
14972 * non-zero return means we have a timer('s)
14973 * to start. Zero means no timer (no keepalive
14974 * I suppose).
14976 uint32_t tov = 0;
14978 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags;
14979 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) {
14980 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to;
14981 if (TSTMP_GT(qr.timer_pacing_to, us_cts))
14982 tov = qr.timer_pacing_to - us_cts;
14983 else
14984 tov = HPTS_TICKS_PER_SLOT;
14986 if (qr.timer_hpts_flags & PACE_TMR_MASK) {
14987 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp;
14988 if (tov == 0) {
14989 if (TSTMP_GT(qr.timer_timer_exp, us_cts))
14990 tov = qr.timer_timer_exp - us_cts;
14991 else
14992 tov = HPTS_TICKS_PER_SLOT;
14995 rack_log_chg_info(tp, rack, 4,
14996 rack->r_ctl.rc_hpts_flags,
14997 rack->r_ctl.rc_last_output_to,
14998 rack->r_ctl.rc_timer_exp);
14999 if (tov) {
15000 struct hpts_diag diag;
15002 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov),
15003 __LINE__, &diag);
15004 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time);
15008 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
15009 __LINE__, RACK_RTTS_INIT);
15011 return (0);
15014 static int
15015 rack_handoff_ok(struct tcpcb *tp)
15017 if ((tp->t_state == TCPS_CLOSED) ||
15018 (tp->t_state == TCPS_LISTEN)) {
15019 /* Sure no problem though it may not stick */
15020 return (0);
15022 if ((tp->t_state == TCPS_SYN_SENT) ||
15023 (tp->t_state == TCPS_SYN_RECEIVED)) {
15025 * We really don't know if you support sack,
15026 * you have to get to ESTAB or beyond to tell.
15028 return (EAGAIN);
15030 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
15032 * Rack will only send a FIN after all data is acknowledged.
15033 * So in this case we have more data outstanding. We can't
15034 * switch stacks until either all data and only the FIN
15035 * is left (in which case rack_init() now knows how
15036 * to deal with that) <or> all is acknowledged and we
15037 * are only left with incoming data, though why you
15038 * would want to switch to rack after all data is acknowledged
15039 * I have no idea (rrs)!
15041 return (EAGAIN);
15043 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
15044 return (0);
15047 * If we reach here we don't do SACK on this connection so we can
15048 * never do rack.
15050 return (EINVAL);
15053 static void
15054 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
15057 if (tp->t_fb_ptr) {
15058 uint32_t cnt_free = 0;
15059 struct tcp_rack *rack;
15060 struct rack_sendmap *rsm;
15062 tcp_handle_orphaned_packets(tp);
15063 tp->t_flags &= ~TF_FORCEDATA;
15064 rack = (struct tcp_rack *)tp->t_fb_ptr;
15065 rack_log_pacing_delay_calc(rack,
15069 rack_get_gp_est(rack), /* delRate */
15070 rack_get_lt_bw(rack), /* rttProp */
15071 20, __LINE__, NULL, 0);
15072 #ifdef NETFLIX_SHARED_CWND
15073 if (rack->r_ctl.rc_scw) {
15074 uint32_t limit;
15076 if (rack->r_limit_scw)
15077 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
15078 else
15079 limit = 0;
15080 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
15081 rack->r_ctl.rc_scw_index,
15082 limit);
15083 rack->r_ctl.rc_scw = NULL;
15085 #endif
15086 if (rack->r_ctl.fsb.tcp_ip_hdr) {
15087 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
15088 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
15089 rack->r_ctl.fsb.th = NULL;
15091 if (rack->rc_always_pace == 1) {
15092 rack_remove_pacing(rack);
15094 /* Clean up any options if they were not applied */
15095 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
15096 struct deferred_opt_list *dol;
15098 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
15099 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
15100 free(dol, M_TCPDO);
15102 /* rack does not use force data but other stacks may clear it */
15103 if (rack->r_ctl.crte != NULL) {
15104 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
15105 rack->rack_hdrw_pacing = 0;
15106 rack->r_ctl.crte = NULL;
15108 #ifdef TCP_BLACKBOX
15109 tcp_log_flowend(tp);
15110 #endif
15112 * Lets take a different approach to purging just
15113 * get each one and free it like a cum-ack would and
15114 * not use a foreach loop.
15116 rsm = tqhash_min(rack->r_ctl.tqh);
15117 while (rsm) {
15118 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
15119 rack->r_ctl.rc_num_maps_alloced--;
15120 uma_zfree(rack_zone, rsm);
15121 rsm = tqhash_min(rack->r_ctl.tqh);
15123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15124 while (rsm) {
15125 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
15126 rack->r_ctl.rc_num_maps_alloced--;
15127 rack->rc_free_cnt--;
15128 cnt_free++;
15129 uma_zfree(rack_zone, rsm);
15130 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15132 if (rack->r_ctl.pcm_s != NULL) {
15133 free(rack->r_ctl.pcm_s, M_TCPPCM);
15134 rack->r_ctl.pcm_s = NULL;
15135 rack->r_ctl.pcm_i.cnt_alloc = 0;
15136 rack->r_ctl.pcm_i.cnt = 0;
15138 if ((rack->r_ctl.rc_num_maps_alloced > 0) &&
15139 (tcp_bblogging_on(tp))) {
15140 union tcp_log_stackspecific log;
15141 struct timeval tv;
15143 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15144 log.u_bbr.flex8 = 10;
15145 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced;
15146 log.u_bbr.flex2 = rack->rc_free_cnt;
15147 log.u_bbr.flex3 = cnt_free;
15148 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15149 rsm = tqhash_min(rack->r_ctl.tqh);
15150 log.u_bbr.delRate = (uintptr_t)rsm;
15151 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15152 log.u_bbr.cur_del_rate = (uintptr_t)rsm;
15153 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
15154 log.u_bbr.pkt_epoch = __LINE__;
15155 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15156 0, &log, false, NULL, NULL, 0, &tv);
15158 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0),
15159 ("rack:%p num_aloc:%u after freeing all?",
15160 rack,
15161 rack->r_ctl.rc_num_maps_alloced));
15162 rack->rc_free_cnt = 0;
15163 free(rack->r_ctl.tqh, M_TCPFSB);
15164 rack->r_ctl.tqh = NULL;
15165 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
15166 tp->t_fb_ptr = NULL;
15168 /* Make sure snd_nxt is correctly set */
15169 tp->snd_nxt = tp->snd_max;
15172 static void
15173 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
15175 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
15176 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0;
15178 switch (tp->t_state) {
15179 case TCPS_SYN_SENT:
15180 rack->r_state = TCPS_SYN_SENT;
15181 rack->r_substate = rack_do_syn_sent;
15182 break;
15183 case TCPS_SYN_RECEIVED:
15184 rack->r_state = TCPS_SYN_RECEIVED;
15185 rack->r_substate = rack_do_syn_recv;
15186 break;
15187 case TCPS_ESTABLISHED:
15188 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15189 rack->r_state = TCPS_ESTABLISHED;
15190 rack->r_substate = rack_do_established;
15191 break;
15192 case TCPS_CLOSE_WAIT:
15193 rack->r_state = TCPS_CLOSE_WAIT;
15194 rack->r_substate = rack_do_close_wait;
15195 break;
15196 case TCPS_FIN_WAIT_1:
15197 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15198 rack->r_state = TCPS_FIN_WAIT_1;
15199 rack->r_substate = rack_do_fin_wait_1;
15200 break;
15201 case TCPS_CLOSING:
15202 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15203 rack->r_state = TCPS_CLOSING;
15204 rack->r_substate = rack_do_closing;
15205 break;
15206 case TCPS_LAST_ACK:
15207 rack_set_pace_segments(tp, rack, __LINE__, NULL);
15208 rack->r_state = TCPS_LAST_ACK;
15209 rack->r_substate = rack_do_lastack;
15210 break;
15211 case TCPS_FIN_WAIT_2:
15212 rack->r_state = TCPS_FIN_WAIT_2;
15213 rack->r_substate = rack_do_fin_wait_2;
15214 break;
15215 case TCPS_LISTEN:
15216 case TCPS_CLOSED:
15217 case TCPS_TIME_WAIT:
15218 default:
15219 break;
15221 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
15222 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
15226 static void
15227 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
15230 * We received an ack, and then did not
15231 * call send or were bounced out due to the
15232 * hpts was running. Now a timer is up as well, is
15233 * it the right timer?
15235 struct rack_sendmap *rsm;
15236 int tmr_up;
15238 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
15239 if (tcp_in_hpts(rack->rc_tp) == 0) {
15241 * Ok we probably need some timer up, but no
15242 * matter what the mask we are not in hpts. We
15243 * may have received an old ack and thus did nothing.
15245 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
15246 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
15247 return;
15249 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
15250 return;
15251 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
15252 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
15253 (tmr_up == PACE_TMR_RXT)) {
15254 /* Should be an RXT */
15255 return;
15257 if (rsm == NULL) {
15258 /* Nothing outstanding? */
15259 if (tp->t_flags & TF_DELACK) {
15260 if (tmr_up == PACE_TMR_DELACK)
15261 /* We are supposed to have delayed ack up and we do */
15262 return;
15263 } else if (((V_tcp_always_keepalive ||
15264 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
15265 (tp->t_state <= TCPS_CLOSING)) &&
15266 (tmr_up == PACE_TMR_KEEP) &&
15267 (tp->snd_max == tp->snd_una)) {
15268 /* We should have keep alive up and we do */
15269 return;
15272 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
15273 ((tmr_up == PACE_TMR_TLP) ||
15274 (tmr_up == PACE_TMR_RACK) ||
15275 (tmr_up == PACE_TMR_RXT))) {
15277 * Either a Rack, TLP or RXT is fine if we
15278 * have outstanding data.
15280 return;
15281 } else if (tmr_up == PACE_TMR_DELACK) {
15283 * If the delayed ack was going to go off
15284 * before the rtx/tlp/rack timer were going to
15285 * expire, then that would be the timer in control.
15286 * Note we don't check the time here trusting the
15287 * code is correct.
15289 return;
15292 * Ok the timer originally started is not what we want now.
15293 * We will force the hpts to be stopped if any, and restart
15294 * with the slot set to what was in the saved slot.
15296 if (tcp_in_hpts(rack->rc_tp)) {
15297 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
15298 uint32_t us_cts;
15300 us_cts = tcp_get_usecs(NULL);
15301 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
15302 rack->r_early = 1;
15303 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
15305 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
15307 tcp_hpts_remove(rack->rc_tp);
15309 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
15310 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
15314 static void
15315 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts)
15317 if ((SEQ_LT(tp->snd_wl1, seq) ||
15318 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) ||
15319 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) {
15320 /* keep track of pure window updates */
15321 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd))
15322 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
15323 tp->snd_wnd = tiwin;
15324 rack_validate_fo_sendwin_up(tp, rack);
15325 tp->snd_wl1 = seq;
15326 tp->snd_wl2 = ack;
15327 if (tp->snd_wnd > tp->max_sndwnd)
15328 tp->max_sndwnd = tp->snd_wnd;
15329 rack->r_wanted_output = 1;
15330 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) {
15331 tp->snd_wnd = tiwin;
15332 rack_validate_fo_sendwin_up(tp, rack);
15333 tp->snd_wl1 = seq;
15334 tp->snd_wl2 = ack;
15335 } else {
15336 /* Not a valid win update */
15337 return;
15339 if (tp->snd_wnd > tp->max_sndwnd)
15340 tp->max_sndwnd = tp->snd_wnd;
15341 /* Do we exit persists? */
15342 if ((rack->rc_in_persist != 0) &&
15343 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
15344 rack->r_ctl.rc_pace_min_segs))) {
15345 rack_exit_persist(tp, rack, cts);
15347 /* Do we enter persists? */
15348 if ((rack->rc_in_persist == 0) &&
15349 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
15350 TCPS_HAVEESTABLISHED(tp->t_state) &&
15351 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
15352 sbavail(&tptosocket(tp)->so_snd) &&
15353 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
15355 * Here the rwnd is less than
15356 * the pacing size, we are established,
15357 * nothing is outstanding, and there is
15358 * data to send. Enter persists.
15360 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack);
15364 static void
15365 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
15368 if (tcp_bblogging_on(rack->rc_tp)) {
15369 struct inpcb *inp = tptoinpcb(tp);
15370 union tcp_log_stackspecific log;
15371 struct timeval ltv;
15372 char tcp_hdr_buf[60];
15373 struct tcphdr *th;
15374 struct timespec ts;
15375 uint32_t orig_snd_una;
15376 uint8_t xx = 0;
15378 #ifdef TCP_REQUEST_TRK
15379 struct tcp_sendfile_track *tcp_req;
15381 if (SEQ_GT(ae->ack, tp->snd_una)) {
15382 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1));
15383 } else {
15384 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack);
15386 #endif
15387 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15388 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
15389 if (rack->rack_no_prr == 0)
15390 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15391 else
15392 log.u_bbr.flex1 = 0;
15393 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
15394 log.u_bbr.use_lt_bw <<= 1;
15395 log.u_bbr.use_lt_bw |= rack->r_might_revert;
15396 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
15397 log.u_bbr.bbr_state = rack->rc_free_cnt;
15398 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
15399 log.u_bbr.pkts_out = tp->t_maxseg;
15400 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
15401 log.u_bbr.flex7 = 1;
15402 log.u_bbr.lost = ae->flags;
15403 log.u_bbr.cwnd_gain = ackval;
15404 log.u_bbr.pacing_gain = 0x2;
15405 if (ae->flags & TSTMP_HDWR) {
15406 /* Record the hardware timestamp if present */
15407 log.u_bbr.flex3 = M_TSTMP;
15408 ts.tv_sec = ae->timestamp / 1000000000;
15409 ts.tv_nsec = ae->timestamp % 1000000000;
15410 ltv.tv_sec = ts.tv_sec;
15411 ltv.tv_usec = ts.tv_nsec / 1000;
15412 log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
15413 } else if (ae->flags & TSTMP_LRO) {
15414 /* Record the LRO the arrival timestamp */
15415 log.u_bbr.flex3 = M_TSTMP_LRO;
15416 ts.tv_sec = ae->timestamp / 1000000000;
15417 ts.tv_nsec = ae->timestamp % 1000000000;
15418 ltv.tv_sec = ts.tv_sec;
15419 ltv.tv_usec = ts.tv_nsec / 1000;
15420 log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
15422 log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
15423 /* Log the rcv time */
15424 log.u_bbr.delRate = ae->timestamp;
15425 #ifdef TCP_REQUEST_TRK
15426 log.u_bbr.applimited = tp->t_tcpreq_closed;
15427 log.u_bbr.applimited <<= 8;
15428 log.u_bbr.applimited |= tp->t_tcpreq_open;
15429 log.u_bbr.applimited <<= 8;
15430 log.u_bbr.applimited |= tp->t_tcpreq_req;
15431 if (tcp_req) {
15432 /* Copy out any client req info */
15433 /* seconds */
15434 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
15435 /* useconds */
15436 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
15437 log.u_bbr.rttProp = tcp_req->timestamp;
15438 log.u_bbr.cur_del_rate = tcp_req->start;
15439 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
15440 log.u_bbr.flex8 |= 1;
15441 } else {
15442 log.u_bbr.flex8 |= 2;
15443 log.u_bbr.bw_inuse = tcp_req->end;
15445 log.u_bbr.flex6 = tcp_req->start_seq;
15446 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
15447 log.u_bbr.flex8 |= 4;
15448 log.u_bbr.epoch = tcp_req->end_seq;
15451 #endif
15452 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
15453 th = (struct tcphdr *)tcp_hdr_buf;
15454 th->th_seq = ae->seq;
15455 th->th_ack = ae->ack;
15456 th->th_win = ae->win;
15457 /* Now fill in the ports */
15458 th->th_sport = inp->inp_fport;
15459 th->th_dport = inp->inp_lport;
15460 tcp_set_flags(th, ae->flags);
15461 /* Now do we have a timestamp option? */
15462 if (ae->flags & HAS_TSTMP) {
15463 u_char *cp;
15464 uint32_t val;
15466 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
15467 cp = (u_char *)(th + 1);
15468 *cp = TCPOPT_NOP;
15469 cp++;
15470 *cp = TCPOPT_NOP;
15471 cp++;
15472 *cp = TCPOPT_TIMESTAMP;
15473 cp++;
15474 *cp = TCPOLEN_TIMESTAMP;
15475 cp++;
15476 val = htonl(ae->ts_value);
15477 bcopy((char *)&val,
15478 (char *)cp, sizeof(uint32_t));
15479 val = htonl(ae->ts_echo);
15480 bcopy((char *)&val,
15481 (char *)(cp + 4), sizeof(uint32_t));
15482 } else
15483 th->th_off = (sizeof(struct tcphdr) >> 2);
15486 * For sane logging we need to play a little trick.
15487 * If the ack were fully processed we would have moved
15488 * snd_una to high_seq, but since compressed acks are
15489 * processed in two phases, at this point (logging) snd_una
15490 * won't be advanced. So we would see multiple acks showing
15491 * the advancement. We can prevent that by "pretending" that
15492 * snd_una was advanced and then un-advancing it so that the
15493 * logging code has the right value for tlb_snd_una.
15495 if (tp->snd_una != high_seq) {
15496 orig_snd_una = tp->snd_una;
15497 tp->snd_una = high_seq;
15498 xx = 1;
15499 } else
15500 xx = 0;
15501 TCP_LOG_EVENTP(tp, th,
15502 &tptosocket(tp)->so_rcv,
15503 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0,
15504 0, &log, true, &ltv);
15505 if (xx) {
15506 tp->snd_una = orig_snd_una;
15512 static void
15513 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts)
15515 uint32_t us_rtt;
15517 * A persist or keep-alive was forced out, update our
15518 * min rtt time. Note now worry about lost responses.
15519 * When a subsequent keep-alive or persist times out
15520 * and forced_ack is still on, then the last probe
15521 * was not responded to. In such cases we have a
15522 * sysctl that controls the behavior. Either we apply
15523 * the rtt but with reduced confidence (0). Or we just
15524 * plain don't apply the rtt estimate. Having data flow
15525 * will clear the probe_not_answered flag i.e. cum-ack
15526 * move forward <or> exiting and reentering persists.
15529 rack->forced_ack = 0;
15530 rack->rc_tp->t_rxtshift = 0;
15531 if ((rack->rc_in_persist &&
15532 (tiwin == rack->rc_tp->snd_wnd)) ||
15533 (rack->rc_in_persist == 0)) {
15535 * In persists only apply the RTT update if this is
15536 * a response to our window probe. And that
15537 * means the rwnd sent must match the current
15538 * snd_wnd. If it does not, then we got a
15539 * window update ack instead. For keepalive
15540 * we allow the answer no matter what the window.
15542 * Note that if the probe_not_answered is set then
15543 * the forced_ack_ts is the oldest one i.e. the first
15544 * probe sent that might have been lost. This assures
15545 * us that if we do calculate an RTT it is longer not
15546 * some short thing.
15548 if (rack->rc_in_persist)
15549 counter_u64_add(rack_persists_acks, 1);
15550 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
15551 if (us_rtt == 0)
15552 us_rtt = 1;
15553 if (rack->probe_not_answered == 0) {
15554 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
15555 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1);
15556 } else {
15557 /* We have a retransmitted probe here too */
15558 if (rack_apply_rtt_with_reduced_conf) {
15559 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
15560 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1);
15566 static void
15567 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq)
15570 * The next send has occurred mark the end of the round
15571 * as when that data gets acknowledged. We can
15572 * also do common things we might need to do when
15573 * a round begins.
15575 rack->r_ctl.roundends = tp->snd_max;
15576 rack->rc_new_rnd_needed = 0;
15577 rack_log_hystart_event(rack, tp->snd_max, 4);
15581 static void
15582 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2,
15583 uint32_t flex3)
15585 if (tcp_bblogging_on(rack->rc_tp)) {
15586 union tcp_log_stackspecific log;
15587 struct timeval tv;
15589 (void)tcp_get_usecs(&tv);
15590 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15591 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv);
15592 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15593 log.u_bbr.flex8 = mod;
15594 log.u_bbr.flex1 = flex1;
15595 log.u_bbr.flex2 = flex2;
15596 log.u_bbr.flex3 = flex3;
15597 log.u_bbr.flex4 = rack_pcm_every_n_rounds;
15598 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds;
15599 log.u_bbr.bbr_substate = rack->pcm_needed;
15600 log.u_bbr.bbr_substate <<= 1;
15601 log.u_bbr.bbr_substate |= rack->pcm_in_progress;
15602 log.u_bbr.bbr_substate <<= 1;
15603 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */
15604 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK,
15605 0, &log, false, NULL, NULL, 0, &tv);
15609 static void
15610 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq)
15613 * The round (current_round) has ended. We now
15614 * setup for the next round by incrementing the
15615 * round numnber and doing any round specific
15616 * things.
15618 rack_log_hystart_event(rack, high_seq, 21);
15619 rack->r_ctl.current_round++;
15620 /* New round (current_round) begins at next send */
15621 rack->rc_new_rnd_needed = 1;
15622 if ((rack->pcm_enabled == 1) &&
15623 (rack->pcm_needed == 0) &&
15624 (rack->pcm_in_progress == 0)) {
15626 * If we have enabled PCM, then we need to
15627 * check if the round has adanced to the state
15628 * where one is required.
15630 int rnds;
15632 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round;
15633 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) {
15634 rack->pcm_needed = 1;
15635 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round );
15636 } else if (rack_verbose_logging) {
15637 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round );
15640 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) {
15641 /* We have hystart enabled send the round info in */
15642 if (CC_ALGO(tp)->newround != NULL) {
15643 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
15647 * For DGP an initial startup check. We want to validate
15648 * that we are not just pushing on slow-start and just
15649 * not gaining.. i.e. filling buffers without getting any
15650 * boost in b/w during the inital slow-start.
15652 if (rack->dgp_on &&
15653 (rack->rc_initial_ss_comp == 0) &&
15654 (tp->snd_cwnd < tp->snd_ssthresh) &&
15655 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) &&
15656 (rack->r_ctl.gp_rnd_thresh > 0) &&
15657 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) {
15660 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where
15661 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets
15662 * exit SS.
15664 * Pick up the flight size now as we enter slowstart (not the
15665 * cwnd which may be inflated).
15667 rack->rc_initial_ss_comp = 1;
15669 if (tcp_bblogging_on(rack->rc_tp)) {
15670 union tcp_log_stackspecific log;
15671 struct timeval tv;
15673 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15674 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
15675 log.u_bbr.flex1 = rack->r_ctl.current_round;
15676 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise;
15677 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh;
15678 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs;
15679 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs;
15680 log.u_bbr.flex8 = 40;
15681 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
15682 0, &log, false, NULL, __func__, __LINE__,&tv);
15684 if ((rack->r_ctl.gate_to_fs == 1) &&
15685 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) {
15686 tp->snd_cwnd = rack->r_ctl.ss_hi_fs;
15688 tp->snd_ssthresh = tp->snd_cwnd - 1;
15689 /* Turn off any fast output running */
15690 rack->r_fast_output = 0;
15694 static int
15695 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
15698 * Handle a "special" compressed ack mbuf. Each incoming
15699 * ack has only four possible dispositions:
15701 * A) It moves the cum-ack forward
15702 * B) It is behind the cum-ack.
15703 * C) It is a window-update ack.
15704 * D) It is a dup-ack.
15706 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
15707 * in the incoming mbuf. We also need to still pay attention
15708 * to nxt_pkt since there may be another packet after this
15709 * one.
15711 #ifdef TCP_ACCOUNTING
15712 uint64_t ts_val;
15713 uint64_t rdstc;
15714 #endif
15715 int segsiz;
15716 struct timespec ts;
15717 struct tcp_rack *rack;
15718 struct tcp_ackent *ae;
15719 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
15720 int cnt, i, did_out, ourfinisacked = 0;
15721 struct tcpopt to_holder, *to = NULL;
15722 #ifdef TCP_ACCOUNTING
15723 int win_up_req = 0;
15724 #endif
15725 int nsegs = 0;
15726 int under_pacing = 0;
15727 int post_recovery = 0;
15728 #ifdef TCP_ACCOUNTING
15729 sched_pin();
15730 #endif
15731 rack = (struct tcp_rack *)tp->t_fb_ptr;
15732 if (rack->gp_ready &&
15733 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
15734 under_pacing = 1;
15736 if (rack->r_state != tp->t_state)
15737 rack_set_state(tp, rack);
15738 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
15739 (tp->t_flags & TF_GPUTINPROG)) {
15741 * We have a goodput in progress
15742 * and we have entered a late state.
15743 * Do we have enough data in the sb
15744 * to handle the GPUT request?
15746 uint32_t bytes;
15748 bytes = tp->gput_ack - tp->gput_seq;
15749 if (SEQ_GT(tp->gput_seq, tp->snd_una))
15750 bytes += tp->gput_seq - tp->snd_una;
15751 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
15753 * There are not enough bytes in the socket
15754 * buffer that have been sent to cover this
15755 * measurement. Cancel it.
15757 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
15758 rack->r_ctl.rc_gp_srtt /*flex1*/,
15759 tp->gput_seq,
15760 0, 0, 18, __LINE__, NULL, 0);
15761 tp->t_flags &= ~TF_GPUTINPROG;
15764 to = &to_holder;
15765 to->to_flags = 0;
15766 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
15767 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
15768 cnt = m->m_len / sizeof(struct tcp_ackent);
15769 counter_u64_add(rack_multi_single_eq, cnt);
15770 high_seq = tp->snd_una;
15771 the_win = tp->snd_wnd;
15772 win_seq = tp->snd_wl1;
15773 win_upd_ack = tp->snd_wl2;
15774 cts = tcp_tv_to_usectick(tv);
15775 ms_cts = tcp_tv_to_mssectick(tv);
15776 rack->r_ctl.rc_rcvtime = cts;
15777 segsiz = ctf_fixed_maxseg(tp);
15778 if ((rack->rc_gp_dyn_mul) &&
15779 (rack->use_fixed_rate == 0) &&
15780 (rack->rc_always_pace)) {
15781 /* Check in on probertt */
15782 rack_check_probe_rtt(rack, cts);
15784 for (i = 0; i < cnt; i++) {
15785 #ifdef TCP_ACCOUNTING
15786 ts_val = get_cyclecount();
15787 #endif
15788 rack_clear_rate_sample(rack);
15789 ae = ((mtod(m, struct tcp_ackent *)) + i);
15790 if (ae->flags & TH_FIN)
15791 rack_log_pacing_delay_calc(rack,
15795 rack_get_gp_est(rack), /* delRate */
15796 rack_get_lt_bw(rack), /* rttProp */
15797 20, __LINE__, NULL, 0);
15798 /* Setup the window */
15799 tiwin = ae->win << tp->snd_scale;
15800 if (tiwin > rack->r_ctl.rc_high_rwnd)
15801 rack->r_ctl.rc_high_rwnd = tiwin;
15802 /* figure out the type of ack */
15803 if (SEQ_LT(ae->ack, high_seq)) {
15804 /* Case B*/
15805 ae->ack_val_set = ACK_BEHIND;
15806 } else if (SEQ_GT(ae->ack, high_seq)) {
15807 /* Case A */
15808 ae->ack_val_set = ACK_CUMACK;
15809 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){
15810 /* Case D */
15811 ae->ack_val_set = ACK_DUPACK;
15812 } else {
15813 /* Case C */
15814 ae->ack_val_set = ACK_RWND;
15816 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
15817 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
15818 /* Validate timestamp */
15819 if (ae->flags & HAS_TSTMP) {
15820 /* Setup for a timestamp */
15821 to->to_flags = TOF_TS;
15822 ae->ts_echo -= tp->ts_offset;
15823 to->to_tsecr = ae->ts_echo;
15824 to->to_tsval = ae->ts_value;
15826 * If echoed timestamp is later than the current time, fall back to
15827 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
15828 * were used when this connection was established.
15830 if (TSTMP_GT(ae->ts_echo, ms_cts))
15831 to->to_tsecr = 0;
15832 if (tp->ts_recent &&
15833 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
15834 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
15835 #ifdef TCP_ACCOUNTING
15836 rdstc = get_cyclecount();
15837 if (rdstc > ts_val) {
15838 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15839 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
15842 #endif
15843 continue;
15846 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
15847 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
15848 tp->ts_recent_age = tcp_ts_getticks();
15849 tp->ts_recent = ae->ts_value;
15851 } else {
15852 /* Setup for a no options */
15853 to->to_flags = 0;
15855 /* Update the rcv time and perform idle reduction possibly */
15856 if (tp->t_idle_reduce &&
15857 (tp->snd_max == tp->snd_una) &&
15858 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
15859 counter_u64_add(rack_input_idle_reduces, 1);
15860 rack_cc_after_idle(rack, tp);
15862 tp->t_rcvtime = ticks;
15863 /* Now what about ECN of a chain of pure ACKs? */
15864 if (tcp_ecn_input_segment(tp, ae->flags, 0,
15865 tcp_packets_this_ack(tp, ae->ack),
15866 ae->codepoint))
15867 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__);
15868 #ifdef TCP_ACCOUNTING
15869 /* Count for the specific type of ack in */
15870 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15871 tp->tcp_cnt_counters[ae->ack_val_set]++;
15873 #endif
15875 * Note how we could move up these in the determination
15876 * above, but we don't so that way the timestamp checks (and ECN)
15877 * is done first before we do any processing on the ACK.
15878 * The non-compressed path through the code has this
15879 * weakness (noted by @jtl) that it actually does some
15880 * processing before verifying the timestamp information.
15881 * We don't take that path here which is why we set
15882 * the ack_val_set first, do the timestamp and ecn
15883 * processing, and then look at what we have setup.
15885 if (ae->ack_val_set == ACK_BEHIND) {
15887 * Case B flag reordering, if window is not closed
15888 * or it could be a keep-alive or persists
15890 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
15891 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
15892 if (rack->r_ctl.rc_reorder_ts == 0)
15893 rack->r_ctl.rc_reorder_ts = 1;
15895 } else if (ae->ack_val_set == ACK_DUPACK) {
15896 /* Case D */
15897 rack_strike_dupack(rack, ae->ack);
15898 } else if (ae->ack_val_set == ACK_RWND) {
15899 /* Case C */
15900 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
15901 ts.tv_sec = ae->timestamp / 1000000000;
15902 ts.tv_nsec = ae->timestamp % 1000000000;
15903 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
15904 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
15905 } else {
15906 rack->r_ctl.act_rcv_time = *tv;
15908 if (rack->forced_ack) {
15909 rack_handle_probe_response(rack, tiwin,
15910 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
15912 #ifdef TCP_ACCOUNTING
15913 win_up_req = 1;
15914 #endif
15915 win_upd_ack = ae->ack;
15916 win_seq = ae->seq;
15917 the_win = tiwin;
15918 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
15919 } else {
15920 /* Case A */
15921 if (SEQ_GT(ae->ack, tp->snd_max)) {
15923 * We just send an ack since the incoming
15924 * ack is beyond the largest seq we sent.
15926 if ((tp->t_flags & TF_ACKNOW) == 0) {
15927 ctf_ack_war_checks(tp);
15928 if (tp->t_flags && TF_ACKNOW)
15929 rack->r_wanted_output = 1;
15931 } else {
15932 nsegs++;
15933 /* If the window changed setup to update */
15934 if (tiwin != tp->snd_wnd) {
15935 win_upd_ack = ae->ack;
15936 win_seq = ae->seq;
15937 the_win = tiwin;
15938 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
15940 #ifdef TCP_ACCOUNTING
15941 /* Account for the acks */
15942 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15943 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
15945 #endif
15946 high_seq = ae->ack;
15947 /* Setup our act_rcv_time */
15948 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
15949 ts.tv_sec = ae->timestamp / 1000000000;
15950 ts.tv_nsec = ae->timestamp % 1000000000;
15951 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
15952 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
15953 } else {
15954 rack->r_ctl.act_rcv_time = *tv;
15956 rack_process_to_cumack(tp, rack, ae->ack, cts, to,
15957 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
15958 #ifdef TCP_REQUEST_TRK
15959 rack_req_check_for_comp(rack, high_seq);
15960 #endif
15961 if (rack->rc_dsack_round_seen) {
15962 /* Is the dsack round over? */
15963 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) {
15964 /* Yes it is */
15965 rack->rc_dsack_round_seen = 0;
15966 rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
15971 /* And lets be sure to commit the rtt measurements for this ack */
15972 tcp_rack_xmit_timer_commit(rack, tp);
15973 #ifdef TCP_ACCOUNTING
15974 rdstc = get_cyclecount();
15975 if (rdstc > ts_val) {
15976 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15977 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
15978 if (ae->ack_val_set == ACK_CUMACK)
15979 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
15982 #endif
15984 #ifdef TCP_ACCOUNTING
15985 ts_val = get_cyclecount();
15986 #endif
15987 /* Tend to any collapsed window */
15988 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) {
15989 /* The peer collapsed the window */
15990 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__);
15991 } else if (rack->rc_has_collapsed)
15992 rack_un_collapse_window(rack, __LINE__);
15993 if ((rack->r_collapse_point_valid) &&
15994 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point)))
15995 rack->r_collapse_point_valid = 0;
15996 acked_amount = acked = (high_seq - tp->snd_una);
15997 if (acked) {
15999 * The draft (v3) calls for us to use SEQ_GEQ, but that
16000 * causes issues when we are just going app limited. Lets
16001 * instead use SEQ_GT <or> where its equal but more data
16002 * is outstanding.
16004 * Also make sure we are on the last ack of a series. We
16005 * have to have all the ack's processed in queue to know
16006 * if there is something left outstanding.
16009 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) &&
16010 (rack->rc_new_rnd_needed == 0) &&
16011 (nxt_pkt == 0)) {
16013 * We have crossed into a new round with
16014 * this th_ack value.
16016 rack_new_round_setup(tp, rack, high_seq);
16019 * Clear the probe not answered flag
16020 * since cum-ack moved forward.
16022 rack->probe_not_answered = 0;
16023 if (tp->t_flags & TF_NEEDSYN) {
16025 * T/TCP: Connection was half-synchronized, and our SYN has
16026 * been ACK'd (so connection is now fully synchronized). Go
16027 * to non-starred state, increment snd_una for ACK of SYN,
16028 * and check if we can do window scaling.
16030 tp->t_flags &= ~TF_NEEDSYN;
16031 tp->snd_una++;
16032 acked_amount = acked = (high_seq - tp->snd_una);
16034 if (acked > sbavail(&so->so_snd))
16035 acked_amount = sbavail(&so->so_snd);
16036 if (IN_FASTRECOVERY(tp->t_flags) &&
16037 (rack->rack_no_prr == 0))
16038 rack_update_prr(tp, rack, acked_amount, high_seq);
16039 if (IN_RECOVERY(tp->t_flags)) {
16040 if (SEQ_LT(high_seq, tp->snd_recover) &&
16041 (SEQ_LT(high_seq, tp->snd_max))) {
16042 tcp_rack_partialack(tp);
16043 } else {
16044 rack_post_recovery(tp, high_seq);
16045 post_recovery = 1;
16047 } else if ((rack->rto_from_rec == 1) &&
16048 SEQ_GEQ(high_seq, tp->snd_recover)) {
16050 * We were in recovery, hit a rxt timeout
16051 * and never re-entered recovery. The timeout(s)
16052 * made up all the lost data. In such a case
16053 * we need to clear the rto_from_rec flag.
16055 rack->rto_from_rec = 0;
16057 /* Handle the rack-log-ack part (sendmap) */
16058 if ((sbused(&so->so_snd) == 0) &&
16059 (acked > acked_amount) &&
16060 (tp->t_state >= TCPS_FIN_WAIT_1) &&
16061 (tp->t_flags & TF_SENTFIN)) {
16063 * We must be sure our fin
16064 * was sent and acked (we can be
16065 * in FIN_WAIT_1 without having
16066 * sent the fin).
16068 ourfinisacked = 1;
16070 * Lets make sure snd_una is updated
16071 * since most likely acked_amount = 0 (it
16072 * should be).
16074 tp->snd_una = high_seq;
16076 /* Did we make a RTO error? */
16077 if ((tp->t_flags & TF_PREVVALID) &&
16078 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
16079 tp->t_flags &= ~TF_PREVVALID;
16080 if (tp->t_rxtshift == 1 &&
16081 (int)(ticks - tp->t_badrxtwin) < 0)
16082 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__);
16084 /* Handle the data in the socket buffer */
16085 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
16086 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
16087 if (acked_amount > 0) {
16088 uint32_t p_cwnd;
16089 struct mbuf *mfree;
16091 if (post_recovery) {
16093 * Grab the segsiz, multiply by 2 and add the snd_cwnd
16094 * that is the max the CC should add if we are exiting
16095 * recovery and doing a late add.
16097 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16098 p_cwnd <<= 1;
16099 p_cwnd += tp->snd_cwnd;
16101 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery);
16102 if (post_recovery && (tp->snd_cwnd > p_cwnd)) {
16103 /* Must be non-newreno (cubic) getting too ahead of itself */
16104 tp->snd_cwnd = p_cwnd;
16106 SOCK_SENDBUF_LOCK(so);
16107 mfree = sbcut_locked(&so->so_snd, acked_amount);
16108 tp->snd_una = high_seq;
16109 /* Note we want to hold the sb lock through the sendmap adjust */
16110 rack_adjust_sendmap_head(rack, &so->so_snd);
16111 /* Wake up the socket if we have room to write more */
16112 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
16113 sowwakeup_locked(so);
16114 m_freem(mfree);
16116 /* update progress */
16117 tp->t_acktime = ticks;
16118 rack_log_progress_event(rack, tp, tp->t_acktime,
16119 PROGRESS_UPDATE, __LINE__);
16120 /* Clear out shifts and such */
16121 tp->t_rxtshift = 0;
16122 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
16123 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
16124 rack->rc_tlp_in_progress = 0;
16125 rack->r_ctl.rc_tlp_cnt_out = 0;
16126 /* Send recover and snd_nxt must be dragged along */
16127 if (SEQ_GT(tp->snd_una, tp->snd_recover))
16128 tp->snd_recover = tp->snd_una;
16129 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
16130 tp->snd_nxt = tp->snd_max;
16132 * If the RXT timer is running we want to
16133 * stop it, so we can restart a TLP (or new RXT).
16135 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
16136 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16137 tp->snd_wl2 = high_seq;
16138 tp->t_dupacks = 0;
16139 if (under_pacing &&
16140 (rack->use_fixed_rate == 0) &&
16141 (rack->in_probe_rtt == 0) &&
16142 rack->rc_gp_dyn_mul &&
16143 rack->rc_always_pace) {
16144 /* Check if we are dragging bottom */
16145 rack_check_bottom_drag(tp, rack, so);
16147 if (tp->snd_una == tp->snd_max) {
16148 tp->t_flags &= ~TF_PREVVALID;
16149 rack->r_ctl.retran_during_recovery = 0;
16150 rack->rc_suspicious = 0;
16151 rack->r_ctl.dsack_byte_cnt = 0;
16152 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
16153 if (rack->r_ctl.rc_went_idle_time == 0)
16154 rack->r_ctl.rc_went_idle_time = 1;
16155 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
16156 if (sbavail(&tptosocket(tp)->so_snd) == 0)
16157 tp->t_acktime = 0;
16158 /* Set so we might enter persists... */
16159 rack->r_wanted_output = 1;
16160 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16161 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
16162 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
16163 (sbavail(&so->so_snd) == 0) &&
16164 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
16166 * The socket was gone and the
16167 * peer sent data (not now in the past), time to
16168 * reset him.
16170 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
16171 /* tcp_close will kill the inp pre-log the Reset */
16172 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
16173 #ifdef TCP_ACCOUNTING
16174 rdstc = get_cyclecount();
16175 if (rdstc > ts_val) {
16176 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16177 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16178 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16181 #endif
16182 m_freem(m);
16183 tp = tcp_close(tp);
16184 if (tp == NULL) {
16185 #ifdef TCP_ACCOUNTING
16186 sched_unpin();
16187 #endif
16188 return (1);
16191 * We would normally do drop-with-reset which would
16192 * send back a reset. We can't since we don't have
16193 * all the needed bits. Instead lets arrange for
16194 * a call to tcp_output(). That way since we
16195 * are in the closed state we will generate a reset.
16197 * Note if tcp_accounting is on we don't unpin since
16198 * we do that after the goto label.
16200 goto send_out_a_rst;
16202 if ((sbused(&so->so_snd) == 0) &&
16203 (tp->t_state >= TCPS_FIN_WAIT_1) &&
16204 (tp->t_flags & TF_SENTFIN)) {
16206 * If we can't receive any more data, then closing user can
16207 * proceed. Starting the timer is contrary to the
16208 * specification, but if we don't get a FIN we'll hang
16209 * forever.
16212 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
16213 soisdisconnected(so);
16214 tcp_timer_activate(tp, TT_2MSL,
16215 (tcp_fast_finwait2_recycle ?
16216 tcp_finwait2_timeout :
16217 TP_MAXIDLE(tp)));
16219 if (ourfinisacked == 0) {
16221 * We don't change to fin-wait-2 if we have our fin acked
16222 * which means we are probably in TCPS_CLOSING.
16224 tcp_state_change(tp, TCPS_FIN_WAIT_2);
16228 /* Wake up the socket if we have room to write more */
16229 if (sbavail(&so->so_snd)) {
16230 rack->r_wanted_output = 1;
16231 if (ctf_progress_timeout_check(tp, true)) {
16232 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
16233 tp, tick, PROGRESS_DROP, __LINE__);
16235 * We cheat here and don't send a RST, we should send one
16236 * when the pacer drops the connection.
16238 #ifdef TCP_ACCOUNTING
16239 rdstc = get_cyclecount();
16240 if (rdstc > ts_val) {
16241 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16242 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16243 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16246 sched_unpin();
16247 #endif
16248 (void)tcp_drop(tp, ETIMEDOUT);
16249 m_freem(m);
16250 return (1);
16253 if (ourfinisacked) {
16254 switch(tp->t_state) {
16255 case TCPS_CLOSING:
16256 #ifdef TCP_ACCOUNTING
16257 rdstc = get_cyclecount();
16258 if (rdstc > ts_val) {
16259 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16260 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16261 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16264 sched_unpin();
16265 #endif
16266 tcp_twstart(tp);
16267 m_freem(m);
16268 return (1);
16269 break;
16270 case TCPS_LAST_ACK:
16271 #ifdef TCP_ACCOUNTING
16272 rdstc = get_cyclecount();
16273 if (rdstc > ts_val) {
16274 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16275 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16276 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16279 sched_unpin();
16280 #endif
16281 tp = tcp_close(tp);
16282 ctf_do_drop(m, tp);
16283 return (1);
16284 break;
16285 case TCPS_FIN_WAIT_1:
16286 #ifdef TCP_ACCOUNTING
16287 rdstc = get_cyclecount();
16288 if (rdstc > ts_val) {
16289 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16290 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16291 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16294 #endif
16295 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
16296 soisdisconnected(so);
16297 tcp_timer_activate(tp, TT_2MSL,
16298 (tcp_fast_finwait2_recycle ?
16299 tcp_finwait2_timeout :
16300 TP_MAXIDLE(tp)));
16302 tcp_state_change(tp, TCPS_FIN_WAIT_2);
16303 break;
16304 default:
16305 break;
16308 if (rack->r_fast_output) {
16310 * We re doing fast output.. can we expand that?
16312 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
16314 #ifdef TCP_ACCOUNTING
16315 rdstc = get_cyclecount();
16316 if (rdstc > ts_val) {
16317 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16318 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
16319 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
16323 } else if (win_up_req) {
16324 rdstc = get_cyclecount();
16325 if (rdstc > ts_val) {
16326 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16327 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
16330 #endif
16332 /* Now is there a next packet, if so we are done */
16333 m_freem(m);
16334 did_out = 0;
16335 if (nxt_pkt) {
16336 #ifdef TCP_ACCOUNTING
16337 sched_unpin();
16338 #endif
16339 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
16340 return (0);
16342 rack_handle_might_revert(tp, rack);
16343 ctf_calc_rwin(so, tp);
16344 if ((rack->r_wanted_output != 0) ||
16345 (rack->r_fast_output != 0) ||
16346 (tp->t_flags & TF_ACKNOW )) {
16347 send_out_a_rst:
16348 if (tcp_output(tp) < 0) {
16349 #ifdef TCP_ACCOUNTING
16350 sched_unpin();
16351 #endif
16352 return (1);
16354 did_out = 1;
16356 if (tp->t_flags2 & TF2_HPTS_CALLS)
16357 tp->t_flags2 &= ~TF2_HPTS_CALLS;
16358 rack_free_trim(rack);
16359 #ifdef TCP_ACCOUNTING
16360 sched_unpin();
16361 #endif
16362 rack_timer_audit(tp, rack, &so->so_snd);
16363 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
16364 return (0);
16367 #define TCP_LRO_TS_OPTION \
16368 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
16369 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
16371 static int
16372 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
16373 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt,
16374 struct timeval *tv)
16376 struct inpcb *inp = tptoinpcb(tp);
16377 struct socket *so = tptosocket(tp);
16378 #ifdef TCP_ACCOUNTING
16379 uint64_t ts_val;
16380 #endif
16381 int32_t thflags, retval, did_out = 0;
16382 int32_t way_out = 0;
16384 * cts - is the current time from tv (caller gets ts) in microseconds.
16385 * ms_cts - is the current time from tv in milliseconds.
16386 * us_cts - is the time that LRO or hardware actually got the packet in microseconds.
16388 uint32_t cts, us_cts, ms_cts;
16389 uint32_t tiwin;
16390 struct timespec ts;
16391 struct tcpopt to;
16392 struct tcp_rack *rack;
16393 struct rack_sendmap *rsm;
16394 int32_t prev_state = 0;
16395 int no_output = 0;
16396 int slot_remaining = 0;
16397 #ifdef TCP_ACCOUNTING
16398 int ack_val_set = 0xf;
16399 #endif
16400 int nsegs;
16402 NET_EPOCH_ASSERT();
16403 INP_WLOCK_ASSERT(inp);
16406 * tv passed from common code is from either M_TSTMP_LRO or
16407 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
16409 rack = (struct tcp_rack *)tp->t_fb_ptr;
16410 if (rack->rack_deferred_inited == 0) {
16412 * If we are the connecting socket we will
16413 * hit rack_init() when no sequence numbers
16414 * are setup. This makes it so we must defer
16415 * some initialization. Call that now.
16417 rack_deferred_init(tp, rack);
16420 * Check to see if we need to skip any output plans. This
16421 * can happen in the non-LRO path where we are pacing and
16422 * must process the ack coming in but need to defer sending
16423 * anything becase a pacing timer is running.
16425 us_cts = tcp_tv_to_usectick(tv);
16426 if (m->m_flags & M_ACKCMP) {
16428 * All compressed ack's are ack's by definition so
16429 * remove any ack required flag and then do the processing.
16431 rack->rc_ack_required = 0;
16432 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
16434 thflags = tcp_get_flags(th);
16435 if ((rack->rc_always_pace == 1) &&
16436 (rack->rc_ack_can_sendout_data == 0) &&
16437 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16438 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) {
16440 * Ok conditions are right for queuing the packets
16441 * but we do have to check the flags in the inp, it
16442 * could be, if a sack is present, we want to be awoken and
16443 * so should process the packets.
16445 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts;
16446 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) {
16447 no_output = 1;
16448 } else {
16450 * If there is no options, or just a
16451 * timestamp option, we will want to queue
16452 * the packets. This is the same that LRO does
16453 * and will need to change with accurate ECN.
16455 uint32_t *ts_ptr;
16456 int optlen;
16458 optlen = (th->th_off << 2) - sizeof(struct tcphdr);
16459 ts_ptr = (uint32_t *)(th + 1);
16460 if ((optlen == 0) ||
16461 ((optlen == TCPOLEN_TSTAMP_APPA) &&
16462 (*ts_ptr == TCP_LRO_TS_OPTION)))
16463 no_output = 1;
16465 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) {
16467 * It is unrealistic to think we can pace in less than
16468 * the minimum granularity of the pacer (def:250usec). So
16469 * if we have less than that time remaining we should go
16470 * ahead and allow output to be "early". We will attempt to
16471 * make up for it in any pacing time we try to apply on
16472 * the outbound packet.
16474 no_output = 0;
16478 * If there is a RST or FIN lets dump out the bw
16479 * with a FIN the connection may go on but we
16480 * may not.
16482 if ((thflags & TH_FIN) || (thflags & TH_RST))
16483 rack_log_pacing_delay_calc(rack,
16484 rack->r_ctl.gp_bw,
16487 rack_get_gp_est(rack), /* delRate */
16488 rack_get_lt_bw(rack), /* rttProp */
16489 20, __LINE__, NULL, 0);
16490 if (m->m_flags & M_ACKCMP) {
16491 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
16493 cts = tcp_tv_to_usectick(tv);
16494 ms_cts = tcp_tv_to_mssectick(tv);
16495 nsegs = m->m_pkthdr.lro_nsegs;
16496 counter_u64_add(rack_proc_non_comp_ack, 1);
16497 #ifdef TCP_ACCOUNTING
16498 sched_pin();
16499 if (thflags & TH_ACK)
16500 ts_val = get_cyclecount();
16501 #endif
16502 if ((m->m_flags & M_TSTMP) ||
16503 (m->m_flags & M_TSTMP_LRO)) {
16504 mbuf_tstmp2timespec(m, &ts);
16505 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
16506 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
16507 } else
16508 rack->r_ctl.act_rcv_time = *tv;
16509 kern_prefetch(rack, &prev_state);
16510 prev_state = 0;
16512 * Unscale the window into a 32-bit value. For the SYN_SENT state
16513 * the scale is zero.
16515 tiwin = th->th_win << tp->snd_scale;
16516 #ifdef TCP_ACCOUNTING
16517 if (thflags & TH_ACK) {
16519 * We have a tradeoff here. We can either do what we are
16520 * doing i.e. pinning to this CPU and then doing the accounting
16521 * <or> we could do a critical enter, setup the rdtsc and cpu
16522 * as in below, and then validate we are on the same CPU on
16523 * exit. I have choosen to not do the critical enter since
16524 * that often will gain you a context switch, and instead lock
16525 * us (line above this if) to the same CPU with sched_pin(). This
16526 * means we may be context switched out for a higher priority
16527 * interupt but we won't be moved to another CPU.
16529 * If this occurs (which it won't very often since we most likely
16530 * are running this code in interupt context and only a higher
16531 * priority will bump us ... clock?) we will falsely add in
16532 * to the time the interupt processing time plus the ack processing
16533 * time. This is ok since its a rare event.
16535 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
16536 ctf_fixed_maxseg(tp));
16538 #endif
16540 * Parse options on any incoming segment.
16542 memset(&to, 0, sizeof(to));
16543 tcp_dooptions(&to, (u_char *)(th + 1),
16544 (th->th_off << 2) - sizeof(struct tcphdr),
16545 (thflags & TH_SYN) ? TO_SYN : 0);
16546 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
16547 __func__));
16548 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
16549 __func__));
16550 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) {
16552 * We don't look at sack's from the
16553 * peer because the MSS is too small which
16554 * can subject us to an attack.
16556 to.to_flags &= ~TOF_SACK;
16558 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
16559 (tp->t_flags & TF_GPUTINPROG)) {
16561 * We have a goodput in progress
16562 * and we have entered a late state.
16563 * Do we have enough data in the sb
16564 * to handle the GPUT request?
16566 uint32_t bytes;
16568 bytes = tp->gput_ack - tp->gput_seq;
16569 if (SEQ_GT(tp->gput_seq, tp->snd_una))
16570 bytes += tp->gput_seq - tp->snd_una;
16571 if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
16573 * There are not enough bytes in the socket
16574 * buffer that have been sent to cover this
16575 * measurement. Cancel it.
16577 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
16578 rack->r_ctl.rc_gp_srtt /*flex1*/,
16579 tp->gput_seq,
16580 0, 0, 18, __LINE__, NULL, 0);
16581 tp->t_flags &= ~TF_GPUTINPROG;
16584 if (tcp_bblogging_on(rack->rc_tp)) {
16585 union tcp_log_stackspecific log;
16586 struct timeval ltv;
16587 #ifdef TCP_REQUEST_TRK
16588 struct tcp_sendfile_track *tcp_req;
16590 if (SEQ_GT(th->th_ack, tp->snd_una)) {
16591 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1));
16592 } else {
16593 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack);
16595 #endif
16596 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
16597 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
16598 if (rack->rack_no_prr == 0)
16599 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
16600 else
16601 log.u_bbr.flex1 = 0;
16602 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
16603 log.u_bbr.use_lt_bw <<= 1;
16604 log.u_bbr.use_lt_bw |= rack->r_might_revert;
16605 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
16606 log.u_bbr.bbr_state = rack->rc_free_cnt;
16607 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
16608 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
16609 log.u_bbr.flex3 = m->m_flags;
16610 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
16611 log.u_bbr.lost = thflags;
16612 log.u_bbr.pacing_gain = 0x1;
16613 #ifdef TCP_ACCOUNTING
16614 log.u_bbr.cwnd_gain = ack_val_set;
16615 #endif
16616 log.u_bbr.flex7 = 2;
16617 if (m->m_flags & M_TSTMP) {
16618 /* Record the hardware timestamp if present */
16619 mbuf_tstmp2timespec(m, &ts);
16620 ltv.tv_sec = ts.tv_sec;
16621 ltv.tv_usec = ts.tv_nsec / 1000;
16622 log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
16623 } else if (m->m_flags & M_TSTMP_LRO) {
16624 /* Record the LRO the arrival timestamp */
16625 mbuf_tstmp2timespec(m, &ts);
16626 ltv.tv_sec = ts.tv_sec;
16627 ltv.tv_usec = ts.tv_nsec / 1000;
16628 log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
16630 log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
16631 /* Log the rcv time */
16632 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
16633 #ifdef TCP_REQUEST_TRK
16634 log.u_bbr.applimited = tp->t_tcpreq_closed;
16635 log.u_bbr.applimited <<= 8;
16636 log.u_bbr.applimited |= tp->t_tcpreq_open;
16637 log.u_bbr.applimited <<= 8;
16638 log.u_bbr.applimited |= tp->t_tcpreq_req;
16639 if (tcp_req) {
16640 /* Copy out any client req info */
16641 /* seconds */
16642 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
16643 /* useconds */
16644 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
16645 log.u_bbr.rttProp = tcp_req->timestamp;
16646 log.u_bbr.cur_del_rate = tcp_req->start;
16647 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
16648 log.u_bbr.flex8 |= 1;
16649 } else {
16650 log.u_bbr.flex8 |= 2;
16651 log.u_bbr.bw_inuse = tcp_req->end;
16653 log.u_bbr.flex6 = tcp_req->start_seq;
16654 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
16655 log.u_bbr.flex8 |= 4;
16656 log.u_bbr.epoch = tcp_req->end_seq;
16659 #endif
16660 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
16661 tlen, &log, true, &ltv);
16663 /* Remove ack required flag if set, we have one */
16664 if (thflags & TH_ACK)
16665 rack->rc_ack_required = 0;
16666 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
16667 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
16668 way_out = 4;
16669 retval = 0;
16670 m_freem(m);
16671 goto done_with_input;
16674 * If a segment with the ACK-bit set arrives in the SYN-SENT state
16675 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
16677 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
16678 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
16679 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
16680 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
16681 #ifdef TCP_ACCOUNTING
16682 sched_unpin();
16683 #endif
16684 return (1);
16687 * If timestamps were negotiated during SYN/ACK and a
16688 * segment without a timestamp is received, silently drop
16689 * the segment, unless it is a RST segment or missing timestamps are
16690 * tolerated.
16691 * See section 3.2 of RFC 7323.
16693 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
16694 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
16695 way_out = 5;
16696 retval = 0;
16697 m_freem(m);
16698 goto done_with_input;
16701 * Segment received on connection. Reset idle time and keep-alive
16702 * timer. XXX: This should be done after segment validation to
16703 * ignore broken/spoofed segs.
16705 if (tp->t_idle_reduce &&
16706 (tp->snd_max == tp->snd_una) &&
16707 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
16708 counter_u64_add(rack_input_idle_reduces, 1);
16709 rack_cc_after_idle(rack, tp);
16711 tp->t_rcvtime = ticks;
16712 #ifdef STATS
16713 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
16714 #endif
16715 if (tiwin > rack->r_ctl.rc_high_rwnd)
16716 rack->r_ctl.rc_high_rwnd = tiwin;
16718 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
16719 * this to occur after we've validated the segment.
16721 if (tcp_ecn_input_segment(tp, thflags, tlen,
16722 tcp_packets_this_ack(tp, th->th_ack),
16723 iptos))
16724 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__);
16727 * If echoed timestamp is later than the current time, fall back to
16728 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
16729 * were used when this connection was established.
16731 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
16732 to.to_tsecr -= tp->ts_offset;
16733 if (TSTMP_GT(to.to_tsecr, ms_cts))
16734 to.to_tsecr = 0;
16736 if ((rack->r_rcvpath_rtt_up == 1) &&
16737 (to.to_flags & TOF_TS) &&
16738 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) {
16739 uint32_t rtt = 0;
16742 * We are receiving only and thus not sending
16743 * data to do an RTT. We set a flag when we first
16744 * sent this TS to the peer. We now have it back
16745 * and have an RTT to share. We log it as a conf
16746 * 4, we are not so sure about it.. since we
16747 * may have lost an ack.
16749 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv))
16750 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv);
16751 rack->r_rcvpath_rtt_up = 0;
16752 /* Submit and commit the timer */
16753 if (rtt > 0) {
16754 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1);
16755 tcp_rack_xmit_timer_commit(rack, tp);
16759 * If its the first time in we need to take care of options and
16760 * verify we can do SACK for rack!
16762 if (rack->r_state == 0) {
16763 /* Should be init'd by rack_init() */
16764 KASSERT(rack->rc_inp != NULL,
16765 ("%s: rack->rc_inp unexpectedly NULL", __func__));
16766 if (rack->rc_inp == NULL) {
16767 rack->rc_inp = inp;
16771 * Process options only when we get SYN/ACK back. The SYN
16772 * case for incoming connections is handled in tcp_syncache.
16773 * According to RFC1323 the window field in a SYN (i.e., a
16774 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
16775 * this is traditional behavior, may need to be cleaned up.
16777 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
16778 /* Handle parallel SYN for ECN */
16779 tcp_ecn_input_parallel_syn(tp, thflags, iptos);
16780 if ((to.to_flags & TOF_SCALE) &&
16781 (tp->t_flags & TF_REQ_SCALE)) {
16782 tp->t_flags |= TF_RCVD_SCALE;
16783 tp->snd_scale = to.to_wscale;
16784 } else
16785 tp->t_flags &= ~TF_REQ_SCALE;
16787 * Initial send window. It will be updated with the
16788 * next incoming segment to the scaled value.
16790 tp->snd_wnd = th->th_win;
16791 rack_validate_fo_sendwin_up(tp, rack);
16792 if ((to.to_flags & TOF_TS) &&
16793 (tp->t_flags & TF_REQ_TSTMP)) {
16794 tp->t_flags |= TF_RCVD_TSTMP;
16795 tp->ts_recent = to.to_tsval;
16796 tp->ts_recent_age = cts;
16797 } else
16798 tp->t_flags &= ~TF_REQ_TSTMP;
16799 if (to.to_flags & TOF_MSS) {
16800 tcp_mss(tp, to.to_mss);
16802 if ((tp->t_flags & TF_SACK_PERMIT) &&
16803 (to.to_flags & TOF_SACKPERM) == 0)
16804 tp->t_flags &= ~TF_SACK_PERMIT;
16805 if (tp->t_flags & TF_FASTOPEN) {
16806 if (to.to_flags & TOF_FASTOPEN) {
16807 uint16_t mss;
16809 if (to.to_flags & TOF_MSS)
16810 mss = to.to_mss;
16811 else
16812 if ((inp->inp_vflag & INP_IPV6) != 0)
16813 mss = TCP6_MSS;
16814 else
16815 mss = TCP_MSS;
16816 tcp_fastopen_update_cache(tp, mss,
16817 to.to_tfo_len, to.to_tfo_cookie);
16818 } else
16819 tcp_fastopen_disable_path(tp);
16823 * At this point we are at the initial call. Here we decide
16824 * if we are doing RACK or not. We do this by seeing if
16825 * TF_SACK_PERMIT is set and the sack-not-required is clear.
16826 * The code now does do dup-ack counting so if you don't
16827 * switch back you won't get rack & TLP, but you will still
16828 * get this stack.
16831 if ((rack_sack_not_required == 0) &&
16832 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
16833 tcp_switch_back_to_default(tp);
16834 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen,
16835 tlen, iptos);
16836 #ifdef TCP_ACCOUNTING
16837 sched_unpin();
16838 #endif
16839 return (1);
16841 tcp_set_hpts(tp);
16842 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
16844 if (thflags & TH_FIN)
16845 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
16846 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
16847 if ((rack->rc_gp_dyn_mul) &&
16848 (rack->use_fixed_rate == 0) &&
16849 (rack->rc_always_pace)) {
16850 /* Check in on probertt */
16851 rack_check_probe_rtt(rack, cts);
16853 rack_clear_rate_sample(rack);
16854 if ((rack->forced_ack) &&
16855 ((tcp_get_flags(th) & TH_RST) == 0)) {
16856 rack_handle_probe_response(rack, tiwin, us_cts);
16859 * This is the one exception case where we set the rack state
16860 * always. All other times (timers etc) we must have a rack-state
16861 * set (so we assure we have done the checks above for SACK).
16863 rack->r_ctl.rc_rcvtime = cts;
16864 if (rack->r_state != tp->t_state)
16865 rack_set_state(tp, rack);
16866 if (SEQ_GT(th->th_ack, tp->snd_una) &&
16867 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL)
16868 kern_prefetch(rsm, &prev_state);
16869 prev_state = rack->r_state;
16870 if ((thflags & TH_RST) &&
16871 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
16872 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
16873 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) {
16874 /* The connection will be killed by a reset check the tracepoint */
16875 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV);
16877 retval = (*rack->r_substate) (m, th, so,
16878 tp, &to, drop_hdrlen,
16879 tlen, tiwin, thflags, nxt_pkt, iptos);
16880 if (retval == 0) {
16882 * If retval is 1 the tcb is unlocked and most likely the tp
16883 * is gone.
16885 INP_WLOCK_ASSERT(inp);
16886 if ((rack->rc_gp_dyn_mul) &&
16887 (rack->rc_always_pace) &&
16888 (rack->use_fixed_rate == 0) &&
16889 rack->in_probe_rtt &&
16890 (rack->r_ctl.rc_time_probertt_starts == 0)) {
16892 * If we are going for target, lets recheck before
16893 * we output.
16895 rack_check_probe_rtt(rack, cts);
16897 if (rack->set_pacing_done_a_iw == 0) {
16898 /* How much has been acked? */
16899 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
16900 /* We have enough to set in the pacing segment size */
16901 rack->set_pacing_done_a_iw = 1;
16902 rack_set_pace_segments(tp, rack, __LINE__, NULL);
16905 tcp_rack_xmit_timer_commit(rack, tp);
16906 #ifdef TCP_ACCOUNTING
16908 * If we set the ack_val_se to what ack processing we are doing
16909 * we also want to track how many cycles we burned. Note
16910 * the bits after tcp_output we let be "free". This is because
16911 * we are also tracking the tcp_output times as well. Note the
16912 * use of 0xf here since we only have 11 counter (0 - 0xa) and
16913 * 0xf cannot be returned and is what we initialize it too to
16914 * indicate we are not doing the tabulations.
16916 if (ack_val_set != 0xf) {
16917 uint64_t crtsc;
16919 crtsc = get_cyclecount();
16920 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16921 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
16924 #endif
16925 if ((nxt_pkt == 0) && (no_output == 0)) {
16926 if ((rack->r_wanted_output != 0) ||
16927 (tp->t_flags & TF_ACKNOW) ||
16928 (rack->r_fast_output != 0)) {
16930 do_output_now:
16931 if (tcp_output(tp) < 0) {
16932 #ifdef TCP_ACCOUNTING
16933 sched_unpin();
16934 #endif
16935 return (1);
16937 did_out = 1;
16939 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16940 rack_free_trim(rack);
16941 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) {
16942 goto do_output_now;
16943 } else if ((no_output == 1) &&
16944 (nxt_pkt == 0) &&
16945 (tcp_in_hpts(rack->rc_tp) == 0)) {
16947 * We are not in hpts and we had a pacing timer up. Use
16948 * the remaining time (slot_remaining) to restart the timer.
16950 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp));
16951 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0);
16952 rack_free_trim(rack);
16954 /* Clear the flag, it may have been cleared by output but we may not have */
16955 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS))
16956 tp->t_flags2 &= ~TF2_HPTS_CALLS;
16958 * The draft (v3) calls for us to use SEQ_GEQ, but that
16959 * causes issues when we are just going app limited. Lets
16960 * instead use SEQ_GT <or> where its equal but more data
16961 * is outstanding.
16963 * Also make sure we are on the last ack of a series. We
16964 * have to have all the ack's processed in queue to know
16965 * if there is something left outstanding.
16967 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) &&
16968 (rack->rc_new_rnd_needed == 0) &&
16969 (nxt_pkt == 0)) {
16971 * We have crossed into a new round with
16972 * the new snd_unae.
16974 rack_new_round_setup(tp, rack, tp->snd_una);
16976 if ((nxt_pkt == 0) &&
16977 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
16978 (SEQ_GT(tp->snd_max, tp->snd_una) ||
16979 (tp->t_flags & TF_DELACK) ||
16980 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
16981 (tp->t_state <= TCPS_CLOSING)))) {
16982 /* We could not send (probably in the hpts but stopped the timer earlier)? */
16983 if ((tp->snd_max == tp->snd_una) &&
16984 ((tp->t_flags & TF_DELACK) == 0) &&
16985 (tcp_in_hpts(rack->rc_tp)) &&
16986 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
16987 /* keep alive not needed if we are hptsi output yet */
16989 } else {
16990 int late = 0;
16991 if (tcp_in_hpts(tp)) {
16992 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
16993 us_cts = tcp_get_usecs(NULL);
16994 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
16995 rack->r_early = 1;
16996 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
16997 } else
16998 late = 1;
16999 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
17001 tcp_hpts_remove(tp);
17003 if (late && (did_out == 0)) {
17005 * We are late in the sending
17006 * and we did not call the output
17007 * (this probably should not happen).
17009 goto do_output_now;
17011 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
17013 way_out = 1;
17014 } else if (nxt_pkt == 0) {
17015 /* Do we have the correct timer running? */
17016 rack_timer_audit(tp, rack, &so->so_snd);
17017 way_out = 2;
17019 done_with_input:
17020 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
17021 if (did_out)
17022 rack->r_wanted_output = 0;
17025 #ifdef TCP_ACCOUNTING
17026 sched_unpin();
17027 #endif
17028 return (retval);
17031 static void
17032 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
17033 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
17035 struct timeval tv;
17037 /* First lets see if we have old packets */
17038 if (!STAILQ_EMPTY(&tp->t_inqueue)) {
17039 if (ctf_do_queued_segments(tp, 1)) {
17040 m_freem(m);
17041 return;
17044 if (m->m_flags & M_TSTMP_LRO) {
17045 mbuf_tstmp2timeval(m, &tv);
17046 } else {
17047 /* Should not be should we kassert instead? */
17048 tcp_get_usecs(&tv);
17050 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0,
17051 &tv) == 0) {
17052 INP_WUNLOCK(tptoinpcb(tp));
17056 struct rack_sendmap *
17057 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
17059 struct rack_sendmap *rsm = NULL;
17060 int32_t idx;
17061 uint32_t srtt = 0, thresh = 0, ts_low = 0;
17063 /* Return the next guy to be re-transmitted */
17064 if (tqhash_empty(rack->r_ctl.tqh)) {
17065 return (NULL);
17067 if (tp->t_flags & TF_SENTFIN) {
17068 /* retran the end FIN? */
17069 return (NULL);
17071 /* ok lets look at this one */
17072 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
17073 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
17074 return (rsm);
17076 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
17077 goto check_it;
17079 rsm = rack_find_lowest_rsm(rack);
17080 if (rsm == NULL) {
17081 return (NULL);
17083 check_it:
17084 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
17085 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
17087 * No sack so we automatically do the 3 strikes and
17088 * retransmit (no rack timer would be started).
17090 return (rsm);
17092 if (rsm->r_flags & RACK_ACKED) {
17093 return (NULL);
17095 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
17096 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
17097 /* Its not yet ready */
17098 return (NULL);
17100 srtt = rack_grab_rtt(tp, rack);
17101 idx = rsm->r_rtr_cnt - 1;
17102 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
17103 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1);
17104 if ((tsused == ts_low) ||
17105 (TSTMP_LT(tsused, ts_low))) {
17106 /* No time since sending */
17107 return (NULL);
17109 if ((tsused - ts_low) < thresh) {
17110 /* It has not been long enough yet */
17111 return (NULL);
17113 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
17114 ((rsm->r_flags & RACK_SACK_PASSED))) {
17116 * We have passed the dup-ack threshold <or>
17117 * a SACK has indicated this is missing.
17118 * Note that if you are a declared attacker
17119 * it is only the dup-ack threshold that
17120 * will cause retransmits.
17122 /* log retransmit reason */
17123 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
17124 rack->r_fast_output = 0;
17125 return (rsm);
17127 return (NULL);
17130 static void
17131 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot,
17132 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
17133 int line, struct rack_sendmap *rsm, uint8_t quality)
17135 if (tcp_bblogging_on(rack->rc_tp)) {
17136 union tcp_log_stackspecific log;
17137 struct timeval tv;
17139 if (rack_verbose_logging == 0) {
17141 * We are not verbose screen out all but
17142 * ones we always want.
17144 if ((method != 2) &&
17145 (method != 3) &&
17146 (method != 7) &&
17147 (method != 89) &&
17148 (method != 14) &&
17149 (method != 20)) {
17150 return;
17153 memset(&log, 0, sizeof(log));
17154 log.u_bbr.flex1 = slot;
17155 log.u_bbr.flex2 = len;
17156 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
17157 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
17158 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
17159 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
17160 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
17161 log.u_bbr.use_lt_bw <<= 1;
17162 log.u_bbr.use_lt_bw |= rack->r_late;
17163 log.u_bbr.use_lt_bw <<= 1;
17164 log.u_bbr.use_lt_bw |= rack->r_early;
17165 log.u_bbr.use_lt_bw <<= 1;
17166 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
17167 log.u_bbr.use_lt_bw <<= 1;
17168 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
17169 log.u_bbr.use_lt_bw <<= 1;
17170 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
17171 log.u_bbr.use_lt_bw <<= 1;
17172 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
17173 log.u_bbr.use_lt_bw <<= 1;
17174 log.u_bbr.use_lt_bw |= rack->gp_ready;
17175 log.u_bbr.pkt_epoch = line;
17176 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
17177 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
17178 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
17179 log.u_bbr.bw_inuse = bw_est;
17180 log.u_bbr.delRate = bw;
17181 if (rack->r_ctl.gp_bw == 0)
17182 log.u_bbr.cur_del_rate = 0;
17183 else
17184 log.u_bbr.cur_del_rate = rack_get_bw(rack);
17185 log.u_bbr.rttProp = len_time;
17186 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
17187 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
17188 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
17189 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
17190 /* We are in slow start */
17191 log.u_bbr.flex7 = 1;
17192 } else {
17193 /* we are on congestion avoidance */
17194 log.u_bbr.flex7 = 0;
17196 log.u_bbr.flex8 = method;
17197 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
17198 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
17199 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
17200 log.u_bbr.cwnd_gain <<= 1;
17201 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
17202 log.u_bbr.cwnd_gain <<= 1;
17203 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
17204 log.u_bbr.bbr_substate = quality;
17205 log.u_bbr.bbr_state = rack->dgp_on;
17206 log.u_bbr.bbr_state <<= 1;
17207 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd;
17208 log.u_bbr.bbr_state <<= 2;
17209 TCP_LOG_EVENTP(rack->rc_tp, NULL,
17210 &rack->rc_inp->inp_socket->so_rcv,
17211 &rack->rc_inp->inp_socket->so_snd,
17212 BBR_LOG_HPTSI_CALC, 0,
17213 0, &log, false, &tv);
17217 static uint32_t
17218 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
17220 uint32_t new_tso, user_max, pace_one;
17222 user_max = rack->rc_user_set_max_segs * mss;
17223 if (rack->rc_force_max_seg) {
17224 return (user_max);
17226 if (rack->use_fixed_rate &&
17227 ((rack->r_ctl.crte == NULL) ||
17228 (bw != rack->r_ctl.crte->rate))) {
17229 /* Use the user mss since we are not exactly matched */
17230 return (user_max);
17232 if (rack_pace_one_seg ||
17233 (rack->r_ctl.rc_user_set_min_segs == 1))
17234 pace_one = 1;
17235 else
17236 pace_one = 0;
17238 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss,
17239 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
17240 if (new_tso > user_max)
17241 new_tso = user_max;
17242 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) {
17243 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso)
17244 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss;
17246 if (rack->r_ctl.rc_user_set_min_segs &&
17247 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso))
17248 new_tso = rack->r_ctl.rc_user_set_min_segs * mss;
17249 return (new_tso);
17252 static uint64_t
17253 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b)
17255 uint64_t reduced_win;
17256 uint32_t gain;
17258 if (window_input < rc_init_window(rack)) {
17260 * The cwnd is collapsed to
17261 * nearly zero, maybe because of a time-out?
17262 * Lets drop back to the lt-bw.
17264 reduced_win = rack_get_lt_bw(rack);
17265 /* Set the flag so the caller knows its a rate and not a reduced window */
17266 *rate_set = 1;
17267 gain = 100;
17268 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) {
17270 * If we are in recover our cwnd needs to be less for
17271 * our pacing consideration.
17273 if (rack->rack_hibeta == 0) {
17274 reduced_win = window_input / 2;
17275 gain = 50;
17276 } else {
17277 reduced_win = window_input * rack->r_ctl.saved_hibeta;
17278 reduced_win /= 100;
17279 gain = rack->r_ctl.saved_hibeta;
17281 } else {
17283 * Apply Timely factor to increase/decrease the
17284 * amount we are pacing at.
17286 gain = rack_get_output_gain(rack, NULL);
17287 if (gain > rack_gain_p5_ub) {
17288 gain = rack_gain_p5_ub;
17290 reduced_win = window_input * gain;
17291 reduced_win /= 100;
17293 if (gain_b != NULL)
17294 *gain_b = gain;
17296 * What is being returned here is a trimmed down
17297 * window values in all cases where rate_set is left
17298 * at 0. In one case we actually return the rate (lt_bw).
17299 * the "reduced_win" is returned as a slimmed down cwnd that
17300 * is then calculated by the caller into a rate when rate_set
17301 * is 0.
17303 return (reduced_win);
17306 static int32_t
17307 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
17309 uint64_t lentim, fill_bw;
17311 rack->r_via_fill_cw = 0;
17312 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
17313 return (slot);
17314 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
17315 return (slot);
17316 if (rack->r_ctl.rc_last_us_rtt == 0)
17317 return (slot);
17318 if (rack->rc_pace_fill_if_rttin_range &&
17319 (rack->r_ctl.rc_last_us_rtt >=
17320 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
17321 /* The rtt is huge, N * smallest, lets not fill */
17322 return (slot);
17324 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap)
17325 return (slot);
17327 * first lets calculate the b/w based on the last us-rtt
17328 * and the the smallest send window.
17330 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use);
17331 if (rack->rc_fillcw_apply_discount) {
17332 uint32_t rate_set = 0;
17334 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL);
17335 if (rate_set) {
17336 goto at_lt_bw;
17339 /* Take the rwnd if its smaller */
17340 if (fill_bw > rack->rc_tp->snd_wnd)
17341 fill_bw = rack->rc_tp->snd_wnd;
17342 /* Now lets make it into a b/w */
17343 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
17344 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
17345 /* Adjust to any cap */
17346 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap)
17347 fill_bw = rack->r_ctl.fillcw_cap;
17349 at_lt_bw:
17350 if (rack_bw_multipler > 0) {
17352 * We want to limit fill-cw to the some multiplier
17353 * of the max(lt_bw, gp_est). The normal default
17354 * is 0 for off, so a sysctl has enabled it.
17356 uint64_t lt_bw, gp, rate;
17358 gp = rack_get_gp_est(rack);
17359 lt_bw = rack_get_lt_bw(rack);
17360 if (lt_bw > gp)
17361 rate = lt_bw;
17362 else
17363 rate = gp;
17364 rate *= rack_bw_multipler;
17365 rate /= 100;
17366 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
17367 union tcp_log_stackspecific log;
17368 struct timeval tv;
17370 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
17371 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
17372 log.u_bbr.flex1 = rack_bw_multipler;
17373 log.u_bbr.flex2 = len;
17374 log.u_bbr.cur_del_rate = gp;
17375 log.u_bbr.delRate = lt_bw;
17376 log.u_bbr.bw_inuse = rate;
17377 log.u_bbr.rttProp = fill_bw;
17378 log.u_bbr.flex8 = 44;
17379 tcp_log_event(rack->rc_tp, NULL, NULL, NULL,
17380 BBR_LOG_CWND, 0,
17381 0, &log, false, NULL,
17382 __func__, __LINE__, &tv);
17384 if (fill_bw > rate)
17385 fill_bw = rate;
17387 /* We are below the min b/w */
17388 if (non_paced)
17389 *rate_wanted = fill_bw;
17390 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
17391 return (slot);
17392 rack->r_via_fill_cw = 1;
17393 if (rack->r_rack_hw_rate_caps &&
17394 (rack->r_ctl.crte != NULL)) {
17395 uint64_t high_rate;
17397 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
17398 if (fill_bw > high_rate) {
17399 /* We are capping bw at the highest rate table entry */
17400 if (*rate_wanted > high_rate) {
17401 /* The original rate was also capped */
17402 rack->r_via_fill_cw = 0;
17404 rack_log_hdwr_pacing(rack,
17405 fill_bw, high_rate, __LINE__,
17406 0, 3);
17407 fill_bw = high_rate;
17408 if (capped)
17409 *capped = 1;
17411 } else if ((rack->r_ctl.crte == NULL) &&
17412 (rack->rack_hdrw_pacing == 0) &&
17413 (rack->rack_hdw_pace_ena) &&
17414 rack->r_rack_hw_rate_caps &&
17415 (rack->rack_attempt_hdwr_pace == 0) &&
17416 (rack->rc_inp->inp_route.ro_nh != NULL) &&
17417 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
17419 * Ok we may have a first attempt that is greater than our top rate
17420 * lets check.
17422 uint64_t high_rate;
17424 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
17425 if (high_rate) {
17426 if (fill_bw > high_rate) {
17427 fill_bw = high_rate;
17428 if (capped)
17429 *capped = 1;
17433 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) {
17434 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
17435 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__);
17436 fill_bw = rack->r_ctl.bw_rate_cap;
17439 * Ok fill_bw holds our mythical b/w to fill the cwnd
17440 * in an rtt (unless it was capped), what does that
17441 * time wise equate too?
17443 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
17444 lentim /= fill_bw;
17445 *rate_wanted = fill_bw;
17446 if (non_paced || (lentim < slot)) {
17447 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
17448 0, lentim, 12, __LINE__, NULL, 0);
17449 return ((int32_t)lentim);
17450 } else
17451 return (slot);
17454 static int32_t
17455 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line)
17457 uint64_t srtt;
17458 int32_t slot = 0;
17459 int can_start_hw_pacing = 1;
17460 int err;
17461 int pace_one;
17463 if (rack_pace_one_seg ||
17464 (rack->r_ctl.rc_user_set_min_segs == 1))
17465 pace_one = 1;
17466 else
17467 pace_one = 0;
17468 if (rack->rc_always_pace == 0) {
17470 * We use the most optimistic possible cwnd/srtt for
17471 * sending calculations. This will make our
17472 * calculation anticipate getting more through
17473 * quicker then possible. But thats ok we don't want
17474 * the peer to have a gap in data sending.
17476 uint64_t cwnd, tr_perms = 0;
17477 int32_t reduce;
17479 old_method:
17481 * We keep no precise pacing with the old method
17482 * instead we use the pacer to mitigate bursts.
17484 if (rack->r_ctl.rc_rack_min_rtt)
17485 srtt = rack->r_ctl.rc_rack_min_rtt;
17486 else
17487 srtt = max(tp->t_srtt, 1);
17488 if (rack->r_ctl.rc_rack_largest_cwnd)
17489 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
17490 else
17491 cwnd = rack->r_ctl.cwnd_to_use;
17492 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
17493 tr_perms = (cwnd * 1000) / srtt;
17494 if (tr_perms == 0) {
17495 tr_perms = ctf_fixed_maxseg(tp);
17498 * Calculate how long this will take to drain, if
17499 * the calculation comes out to zero, thats ok we
17500 * will use send_a_lot to possibly spin around for
17501 * more increasing tot_len_this_send to the point
17502 * that its going to require a pace, or we hit the
17503 * cwnd. Which in that case we are just waiting for
17504 * a ACK.
17506 slot = len / tr_perms;
17507 /* Now do we reduce the time so we don't run dry? */
17508 if (slot && rack_slot_reduction) {
17509 reduce = (slot / rack_slot_reduction);
17510 if (reduce < slot) {
17511 slot -= reduce;
17512 } else
17513 slot = 0;
17514 } else
17515 reduce = 0;
17516 slot *= HPTS_USEC_IN_MSEC;
17517 if (rack->rc_pace_to_cwnd) {
17518 uint64_t rate_wanted = 0;
17520 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
17521 rack->rc_ack_can_sendout_data = 1;
17522 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
17523 } else
17524 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
17525 /*******************************************************/
17526 /* RRS: We insert non-paced call to stats here for len */
17527 /*******************************************************/
17528 } else {
17529 uint64_t bw_est, res, lentim, rate_wanted;
17530 uint32_t segs, oh;
17531 int capped = 0;
17532 int prev_fill;
17534 if ((rack->r_rr_config == 1) && rsm) {
17535 return (rack->r_ctl.rc_min_to);
17537 if (rack->use_fixed_rate) {
17538 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
17539 } else if ((rack->r_ctl.init_rate == 0) &&
17540 (rack->r_ctl.gp_bw == 0)) {
17541 /* no way to yet do an estimate */
17542 bw_est = rate_wanted = 0;
17543 } else if (rack->dgp_on) {
17544 bw_est = rack_get_bw(rack);
17545 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
17546 } else {
17547 uint32_t gain, rate_set = 0;
17549 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use);
17550 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain);
17551 if (rate_set == 0) {
17552 if (rate_wanted > rack->rc_tp->snd_wnd)
17553 rate_wanted = rack->rc_tp->snd_wnd;
17554 /* Now lets make it into a b/w */
17555 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC;
17556 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
17558 bw_est = rate_wanted;
17559 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd,
17560 rack->r_ctl.cwnd_to_use,
17561 rate_wanted, bw_est,
17562 rack->r_ctl.rc_last_us_rtt,
17563 88, __LINE__, NULL, gain);
17565 if ((bw_est == 0) || (rate_wanted == 0) ||
17566 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
17568 * No way yet to make a b/w estimate or
17569 * our raise is set incorrectly.
17571 goto old_method;
17573 rack_rate_cap_bw(rack, &rate_wanted, &capped);
17574 /* We need to account for all the overheads */
17575 segs = (len + segsiz - 1) / segsiz;
17577 * We need the diff between 1514 bytes (e-mtu with e-hdr)
17578 * and how much data we put in each packet. Yes this
17579 * means we may be off if we are larger than 1500 bytes
17580 * or smaller. But this just makes us more conservative.
17583 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr);
17584 if (rack->r_is_v6) {
17585 #ifdef INET6
17586 oh += sizeof(struct ip6_hdr);
17587 #endif
17588 } else {
17589 #ifdef INET
17590 oh += sizeof(struct ip);
17591 #endif
17593 /* We add a fixed 14 for the ethernet header */
17594 oh += 14;
17595 segs *= oh;
17596 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
17597 res = lentim / rate_wanted;
17598 slot = (uint32_t)res;
17599 if (rack_hw_rate_min &&
17600 (rate_wanted < rack_hw_rate_min)) {
17601 can_start_hw_pacing = 0;
17602 if (rack->r_ctl.crte) {
17604 * Ok we need to release it, we
17605 * have fallen too low.
17607 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17608 rack->r_ctl.crte = NULL;
17609 rack->rack_attempt_hdwr_pace = 0;
17610 rack->rack_hdrw_pacing = 0;
17613 if (rack->r_ctl.crte &&
17614 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
17616 * We want more than the hardware can give us,
17617 * don't start any hw pacing.
17619 can_start_hw_pacing = 0;
17620 if (rack->r_rack_hw_rate_caps == 0) {
17622 * Ok we need to release it, we
17623 * want more than the card can give us and
17624 * no rate cap is in place. Set it up so
17625 * when we want less we can retry.
17627 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17628 rack->r_ctl.crte = NULL;
17629 rack->rack_attempt_hdwr_pace = 0;
17630 rack->rack_hdrw_pacing = 0;
17633 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) {
17635 * We lost our rate somehow, this can happen
17636 * if the interface changed underneath us.
17638 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17639 rack->r_ctl.crte = NULL;
17640 /* Lets re-allow attempting to setup pacing */
17641 rack->rack_hdrw_pacing = 0;
17642 rack->rack_attempt_hdwr_pace = 0;
17643 rack_log_hdwr_pacing(rack,
17644 rate_wanted, bw_est, __LINE__,
17645 0, 6);
17647 prev_fill = rack->r_via_fill_cw;
17648 if ((rack->rc_pace_to_cwnd) &&
17649 (capped == 0) &&
17650 (rack->dgp_on == 1) &&
17651 (rack->use_fixed_rate == 0) &&
17652 (rack->in_probe_rtt == 0) &&
17653 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
17655 * We want to pace at our rate *or* faster to
17656 * fill the cwnd to the max if its not full.
17658 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
17659 /* Re-check to make sure we are not exceeding our max b/w */
17660 if ((rack->r_ctl.crte != NULL) &&
17661 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
17663 * We want more than the hardware can give us,
17664 * don't start any hw pacing.
17666 can_start_hw_pacing = 0;
17667 if (rack->r_rack_hw_rate_caps == 0) {
17669 * Ok we need to release it, we
17670 * want more than the card can give us and
17671 * no rate cap is in place. Set it up so
17672 * when we want less we can retry.
17674 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17675 rack->r_ctl.crte = NULL;
17676 rack->rack_attempt_hdwr_pace = 0;
17677 rack->rack_hdrw_pacing = 0;
17678 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
17682 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
17683 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
17684 if ((rack->rack_hdw_pace_ena) &&
17685 (can_start_hw_pacing > 0) &&
17686 (rack->rack_hdrw_pacing == 0) &&
17687 (rack->rack_attempt_hdwr_pace == 0)) {
17689 * Lets attempt to turn on hardware pacing
17690 * if we can.
17692 rack->rack_attempt_hdwr_pace = 1;
17693 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
17694 rack->rc_inp->inp_route.ro_nh->nh_ifp,
17695 rate_wanted,
17696 RS_PACING_GEQ,
17697 &err, &rack->r_ctl.crte_prev_rate);
17698 if (rack->r_ctl.crte) {
17699 rack->rack_hdrw_pacing = 1;
17700 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz,
17701 pace_one, rack->r_ctl.crte,
17702 NULL, rack->r_ctl.pace_len_divisor);
17703 rack_log_hdwr_pacing(rack,
17704 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17705 err, 0);
17706 rack->r_ctl.last_hw_bw_req = rate_wanted;
17707 } else {
17708 counter_u64_add(rack_hw_pace_init_fail, 1);
17710 } else if (rack->rack_hdrw_pacing &&
17711 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
17712 /* Do we need to adjust our rate? */
17713 const struct tcp_hwrate_limit_table *nrte;
17715 if (rack->r_up_only &&
17716 (rate_wanted < rack->r_ctl.crte->rate)) {
17718 * We have four possible states here
17719 * having to do with the previous time
17720 * and this time.
17721 * previous | this-time
17722 * A) 0 | 0 -- fill_cw not in the picture
17723 * B) 1 | 0 -- we were doing a fill-cw but now are not
17724 * C) 1 | 1 -- all rates from fill_cw
17725 * D) 0 | 1 -- we were doing non-fill and now we are filling
17727 * For case A, C and D we don't allow a drop. But for
17728 * case B where we now our on our steady rate we do
17729 * allow a drop.
17732 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
17733 goto done_w_hdwr;
17735 if ((rate_wanted > rack->r_ctl.crte->rate) ||
17736 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
17737 if (rack_hw_rate_to_low &&
17738 (bw_est < rack_hw_rate_to_low)) {
17740 * The pacing rate is too low for hardware, but
17741 * do allow hardware pacing to be restarted.
17743 rack_log_hdwr_pacing(rack,
17744 bw_est, rack->r_ctl.crte->rate, __LINE__,
17745 0, 5);
17746 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
17747 rack->r_ctl.crte = NULL;
17748 rack->rack_attempt_hdwr_pace = 0;
17749 rack->rack_hdrw_pacing = 0;
17750 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17751 goto done_w_hdwr;
17753 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
17754 rack->rc_tp,
17755 rack->rc_inp->inp_route.ro_nh->nh_ifp,
17756 rate_wanted,
17757 RS_PACING_GEQ,
17758 &err, &rack->r_ctl.crte_prev_rate);
17759 if (nrte == NULL) {
17761 * Lost the rate, lets drop hardware pacing
17762 * period.
17764 rack->rack_hdrw_pacing = 0;
17765 rack->r_ctl.crte = NULL;
17766 rack_log_hdwr_pacing(rack,
17767 rate_wanted, 0, __LINE__,
17768 err, 1);
17769 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17770 counter_u64_add(rack_hw_pace_lost, 1);
17771 } else if (nrte != rack->r_ctl.crte) {
17772 rack->r_ctl.crte = nrte;
17773 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted,
17774 segsiz, pace_one, rack->r_ctl.crte,
17775 NULL, rack->r_ctl.pace_len_divisor);
17776 rack_log_hdwr_pacing(rack,
17777 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17778 err, 2);
17779 rack->r_ctl.last_hw_bw_req = rate_wanted;
17781 } else {
17782 /* We just need to adjust the segment size */
17783 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
17784 rack_log_hdwr_pacing(rack,
17785 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
17786 0, 4);
17787 rack->r_ctl.last_hw_bw_req = rate_wanted;
17791 done_w_hdwr:
17792 if (rack_limit_time_with_srtt &&
17793 (rack->use_fixed_rate == 0) &&
17794 (rack->rack_hdrw_pacing == 0)) {
17796 * Sanity check, we do not allow the pacing delay
17797 * to be longer than the SRTT of the path. If it is
17798 * a slow path, then adding a packet should increase
17799 * the RTT and compensate for this i.e. the srtt will
17800 * be greater so the allowed pacing time will be greater.
17802 * Note this restriction is not for where a peak rate
17803 * is set, we are doing fixed pacing or hardware pacing.
17805 if (rack->rc_tp->t_srtt)
17806 srtt = rack->rc_tp->t_srtt;
17807 else
17808 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
17809 if (srtt < (uint64_t)slot) {
17810 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
17811 slot = srtt;
17814 /*******************************************************************/
17815 /* RRS: We insert paced call to stats here for len and rate_wanted */
17816 /*******************************************************************/
17817 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
17819 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
17821 * If this rate is seeing enobufs when it
17822 * goes to send then either the nic is out
17823 * of gas or we are mis-estimating the time
17824 * somehow and not letting the queue empty
17825 * completely. Lets add to the pacing time.
17827 int hw_boost_delay;
17829 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
17830 if (hw_boost_delay > rack_enobuf_hw_max)
17831 hw_boost_delay = rack_enobuf_hw_max;
17832 else if (hw_boost_delay < rack_enobuf_hw_min)
17833 hw_boost_delay = rack_enobuf_hw_min;
17834 slot += hw_boost_delay;
17836 return (slot);
17839 static void
17840 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
17841 tcp_seq startseq, uint32_t sb_offset)
17843 struct rack_sendmap *my_rsm = NULL;
17845 if (tp->t_state < TCPS_ESTABLISHED) {
17847 * We don't start any measurements if we are
17848 * not at least established.
17850 return;
17852 if (tp->t_state >= TCPS_FIN_WAIT_1) {
17854 * We will get no more data into the SB
17855 * this means we need to have the data available
17856 * before we start a measurement.
17859 if (sbavail(&tptosocket(tp)->so_snd) <
17860 max(rc_init_window(rack),
17861 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
17862 /* Nope not enough data */
17863 return;
17866 tp->t_flags |= TF_GPUTINPROG;
17867 rack->r_ctl.rc_gp_cumack_ts = 0;
17868 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
17869 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
17870 tp->gput_seq = startseq;
17871 rack->app_limited_needs_set = 0;
17872 if (rack->in_probe_rtt)
17873 rack->measure_saw_probe_rtt = 1;
17874 else if ((rack->measure_saw_probe_rtt) &&
17875 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
17876 rack->measure_saw_probe_rtt = 0;
17877 if (rack->rc_gp_filled)
17878 tp->gput_ts = rack->r_ctl.last_cumack_advance;
17879 else {
17880 /* Special case initial measurement */
17881 struct timeval tv;
17883 tp->gput_ts = tcp_get_usecs(&tv);
17884 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
17887 * We take a guess out into the future,
17888 * if we have no measurement and no
17889 * initial rate, we measure the first
17890 * initial-windows worth of data to
17891 * speed up getting some GP measurement and
17892 * thus start pacing.
17894 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
17895 rack->app_limited_needs_set = 1;
17896 tp->gput_ack = startseq + max(rc_init_window(rack),
17897 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
17898 rack_log_pacing_delay_calc(rack,
17899 tp->gput_seq,
17900 tp->gput_ack,
17902 tp->gput_ts,
17903 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
17905 __LINE__, NULL, 0);
17906 rack_tend_gp_marks(tp, rack);
17907 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
17908 return;
17910 if (sb_offset) {
17912 * We are out somewhere in the sb
17913 * can we use the already outstanding data?
17916 if (rack->r_ctl.rc_app_limited_cnt == 0) {
17918 * Yes first one is good and in this case
17919 * the tp->gput_ts is correctly set based on
17920 * the last ack that arrived (no need to
17921 * set things up when an ack comes in).
17923 my_rsm = tqhash_min(rack->r_ctl.tqh);
17924 if ((my_rsm == NULL) ||
17925 (my_rsm->r_rtr_cnt != 1)) {
17926 /* retransmission? */
17927 goto use_latest;
17929 } else {
17930 if (rack->r_ctl.rc_first_appl == NULL) {
17932 * If rc_first_appl is NULL
17933 * then the cnt should be 0.
17934 * This is probably an error, maybe
17935 * a KASSERT would be approprate.
17937 goto use_latest;
17940 * If we have a marker pointer to the last one that is
17941 * app limited we can use that, but we need to set
17942 * things up so that when it gets ack'ed we record
17943 * the ack time (if its not already acked).
17945 rack->app_limited_needs_set = 1;
17947 * We want to get to the rsm that is either
17948 * next with space i.e. over 1 MSS or the one
17949 * after that (after the app-limited).
17951 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl);
17952 if (my_rsm) {
17953 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
17954 /* Have to use the next one */
17955 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
17956 else {
17957 /* Use after the first MSS of it is acked */
17958 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
17959 goto start_set;
17962 if ((my_rsm == NULL) ||
17963 (my_rsm->r_rtr_cnt != 1)) {
17965 * Either its a retransmit or
17966 * the last is the app-limited one.
17968 goto use_latest;
17971 tp->gput_seq = my_rsm->r_start;
17972 start_set:
17973 if (my_rsm->r_flags & RACK_ACKED) {
17975 * This one has been acked use the arrival ack time
17977 struct rack_sendmap *nrsm;
17979 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
17980 rack->app_limited_needs_set = 0;
17982 * Ok in this path we need to use the r_end now
17983 * since this guy is the starting ack.
17985 tp->gput_seq = my_rsm->r_end;
17987 * We also need to adjust up the sendtime
17988 * to the send of the next data after my_rsm.
17990 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
17991 if (nrsm != NULL)
17992 my_rsm = nrsm;
17993 else {
17995 * The next as not been sent, thats the
17996 * case for using the latest.
17998 goto use_latest;
18001 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
18002 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
18003 rack->r_ctl.rc_gp_cumack_ts = 0;
18004 if ((rack->r_ctl.cleared_app_ack == 1) &&
18005 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) {
18007 * We just cleared an application limited period
18008 * so the next seq out needs to skip the first
18009 * ack.
18011 rack->app_limited_needs_set = 1;
18012 rack->r_ctl.cleared_app_ack = 0;
18014 rack_log_pacing_delay_calc(rack,
18015 tp->gput_seq,
18016 tp->gput_ack,
18017 (uintptr_t)my_rsm,
18018 tp->gput_ts,
18019 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
18021 __LINE__, my_rsm, 0);
18022 /* Now lets make sure all are marked as they should be */
18023 rack_tend_gp_marks(tp, rack);
18024 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
18025 return;
18028 use_latest:
18030 * We don't know how long we may have been
18031 * idle or if this is the first-send. Lets
18032 * setup the flag so we will trim off
18033 * the first ack'd data so we get a true
18034 * measurement.
18036 rack->app_limited_needs_set = 1;
18037 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
18038 rack->r_ctl.rc_gp_cumack_ts = 0;
18039 /* Find this guy so we can pull the send time */
18040 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq);
18041 if (my_rsm) {
18042 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
18043 if (my_rsm->r_flags & RACK_ACKED) {
18045 * Unlikely since its probably what was
18046 * just transmitted (but I am paranoid).
18048 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
18049 rack->app_limited_needs_set = 0;
18051 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
18052 /* This also is unlikely */
18053 tp->gput_seq = my_rsm->r_start;
18055 } else {
18057 * TSNH unless we have some send-map limit,
18058 * and even at that it should not be hitting
18059 * that limit (we should have stopped sending).
18061 struct timeval tv;
18063 microuptime(&tv);
18064 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
18066 rack_tend_gp_marks(tp, rack);
18067 rack_log_pacing_delay_calc(rack,
18068 tp->gput_seq,
18069 tp->gput_ack,
18070 (uintptr_t)my_rsm,
18071 tp->gput_ts,
18072 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
18073 9, __LINE__, NULL, 0);
18074 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
18077 static inline uint32_t
18078 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
18079 uint32_t avail, int32_t sb_offset)
18081 uint32_t len;
18082 uint32_t sendwin;
18084 if (tp->snd_wnd > cwnd_to_use)
18085 sendwin = cwnd_to_use;
18086 else
18087 sendwin = tp->snd_wnd;
18088 if (ctf_outstanding(tp) >= tp->snd_wnd) {
18089 /* We never want to go over our peers rcv-window */
18090 len = 0;
18091 } else {
18092 uint32_t flight;
18094 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
18095 if (flight >= sendwin) {
18097 * We have in flight what we are allowed by cwnd (if
18098 * it was rwnd blocking it would have hit above out
18099 * >= tp->snd_wnd).
18101 return (0);
18103 len = sendwin - flight;
18104 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
18105 /* We would send too much (beyond the rwnd) */
18106 len = tp->snd_wnd - ctf_outstanding(tp);
18108 if ((len + sb_offset) > avail) {
18110 * We don't have that much in the SB, how much is
18111 * there?
18113 len = avail - sb_offset;
18116 return (len);
18119 static void
18120 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
18121 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
18122 int rsm_is_null, int optlen, int line, uint16_t mode)
18124 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
18125 union tcp_log_stackspecific log;
18126 struct timeval tv;
18128 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18129 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
18130 log.u_bbr.flex1 = error;
18131 log.u_bbr.flex2 = flags;
18132 log.u_bbr.flex3 = rsm_is_null;
18133 log.u_bbr.flex4 = ipoptlen;
18134 log.u_bbr.flex5 = tp->rcv_numsacks;
18135 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18136 log.u_bbr.flex7 = optlen;
18137 log.u_bbr.flex8 = rack->r_fsb_inited;
18138 log.u_bbr.applimited = rack->r_fast_output;
18139 log.u_bbr.bw_inuse = rack_get_bw(rack);
18140 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
18141 log.u_bbr.cwnd_gain = mode;
18142 log.u_bbr.pkts_out = orig_len;
18143 log.u_bbr.lt_epoch = len;
18144 log.u_bbr.delivered = line;
18145 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
18146 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18147 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
18148 len, &log, false, NULL, __func__, __LINE__, &tv);
18153 static struct mbuf *
18154 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
18155 struct rack_fast_send_blk *fsb,
18156 int32_t seglimit, int32_t segsize, int hw_tls)
18158 #ifdef KERN_TLS
18159 struct ktls_session *tls, *ntls;
18160 #ifdef INVARIANTS
18161 struct mbuf *start;
18162 #endif
18163 #endif
18164 struct mbuf *m, *n, **np, *smb;
18165 struct mbuf *top;
18166 int32_t off, soff;
18167 int32_t len = *plen;
18168 int32_t fragsize;
18169 int32_t len_cp = 0;
18170 uint32_t mlen, frags;
18172 soff = off = the_off;
18173 smb = m = the_m;
18174 np = &top;
18175 top = NULL;
18176 #ifdef KERN_TLS
18177 if (hw_tls && (m->m_flags & M_EXTPG))
18178 tls = m->m_epg_tls;
18179 else
18180 tls = NULL;
18181 #ifdef INVARIANTS
18182 start = m;
18183 #endif
18184 #endif
18185 while (len > 0) {
18186 if (m == NULL) {
18187 *plen = len_cp;
18188 break;
18190 #ifdef KERN_TLS
18191 if (hw_tls) {
18192 if (m->m_flags & M_EXTPG)
18193 ntls = m->m_epg_tls;
18194 else
18195 ntls = NULL;
18198 * Avoid mixing TLS records with handshake
18199 * data or TLS records from different
18200 * sessions.
18202 if (tls != ntls) {
18203 MPASS(m != start);
18204 *plen = len_cp;
18205 break;
18208 #endif
18209 mlen = min(len, m->m_len - off);
18210 if (seglimit) {
18212 * For M_EXTPG mbufs, add 3 segments
18213 * + 1 in case we are crossing page boundaries
18214 * + 2 in case the TLS hdr/trailer are used
18215 * It is cheaper to just add the segments
18216 * than it is to take the cache miss to look
18217 * at the mbuf ext_pgs state in detail.
18219 if (m->m_flags & M_EXTPG) {
18220 fragsize = min(segsize, PAGE_SIZE);
18221 frags = 3;
18222 } else {
18223 fragsize = segsize;
18224 frags = 0;
18227 /* Break if we really can't fit anymore. */
18228 if ((frags + 1) >= seglimit) {
18229 *plen = len_cp;
18230 break;
18234 * Reduce size if you can't copy the whole
18235 * mbuf. If we can't copy the whole mbuf, also
18236 * adjust len so the loop will end after this
18237 * mbuf.
18239 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
18240 mlen = (seglimit - frags - 1) * fragsize;
18241 len = mlen;
18242 *plen = len_cp + len;
18244 frags += howmany(mlen, fragsize);
18245 if (frags == 0)
18246 frags++;
18247 seglimit -= frags;
18248 KASSERT(seglimit > 0,
18249 ("%s: seglimit went too low", __func__));
18251 n = m_get(M_NOWAIT, m->m_type);
18252 *np = n;
18253 if (n == NULL)
18254 goto nospace;
18255 n->m_len = mlen;
18256 soff += mlen;
18257 len_cp += n->m_len;
18258 if (m->m_flags & (M_EXT | M_EXTPG)) {
18259 n->m_data = m->m_data + off;
18260 mb_dupcl(n, m);
18261 } else {
18262 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
18263 (u_int)n->m_len);
18265 len -= n->m_len;
18266 off = 0;
18267 m = m->m_next;
18268 np = &n->m_next;
18269 if (len || (soff == smb->m_len)) {
18271 * We have more so we move forward or
18272 * we have consumed the entire mbuf and
18273 * len has fell to 0.
18275 soff = 0;
18276 smb = m;
18280 if (fsb != NULL) {
18281 fsb->m = smb;
18282 fsb->off = soff;
18283 if (smb) {
18285 * Save off the size of the mbuf. We do
18286 * this so that we can recognize when it
18287 * has been trimmed by sbcut() as acks
18288 * come in.
18290 fsb->o_m_len = smb->m_len;
18291 fsb->o_t_len = M_TRAILINGROOM(smb);
18292 } else {
18294 * This is the case where the next mbuf went to NULL. This
18295 * means with this copy we have sent everything in the sb.
18296 * In theory we could clear the fast_output flag, but lets
18297 * not since its possible that we could get more added
18298 * and acks that call the extend function which would let
18299 * us send more.
18301 fsb->o_m_len = 0;
18302 fsb->o_t_len = 0;
18305 return (top);
18306 nospace:
18307 if (top)
18308 m_freem(top);
18309 return (NULL);
18314 * This is a copy of m_copym(), taking the TSO segment size/limit
18315 * constraints into account, and advancing the sndptr as it goes.
18317 static struct mbuf *
18318 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
18319 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
18321 struct mbuf *m, *n;
18322 int32_t soff;
18324 m = rack->r_ctl.fsb.m;
18325 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) {
18327 * The trailing space changed, mbufs can grow
18328 * at the tail but they can't shrink from
18329 * it, KASSERT that. Adjust the orig_m_len to
18330 * compensate for this change.
18332 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)),
18333 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
18335 rack,
18336 (intmax_t)M_TRAILINGROOM(m),
18337 rack->r_ctl.fsb.o_t_len,
18338 rack->r_ctl.fsb.o_m_len,
18339 m->m_len));
18340 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m));
18341 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m);
18343 if (m->m_len < rack->r_ctl.fsb.o_m_len) {
18345 * Mbuf shrank, trimmed off the top by an ack, our
18346 * offset changes.
18348 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)),
18349 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n",
18350 m, m->m_len,
18351 rack, rack->r_ctl.fsb.o_m_len,
18352 rack->r_ctl.fsb.off));
18354 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len))
18355 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len);
18356 else
18357 rack->r_ctl.fsb.off = 0;
18358 rack->r_ctl.fsb.o_m_len = m->m_len;
18359 #ifdef INVARIANTS
18360 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) {
18361 panic("rack:%p m:%p m_len grew outside of t_space compensation",
18362 rack, m);
18363 #endif
18365 soff = rack->r_ctl.fsb.off;
18366 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
18367 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
18368 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
18369 __FUNCTION__,
18370 rack, *plen, m, m->m_len));
18371 /* Save off the right location before we copy and advance */
18372 *s_soff = soff;
18373 *s_mb = rack->r_ctl.fsb.m;
18374 n = rack_fo_base_copym(m, soff, plen,
18375 &rack->r_ctl.fsb,
18376 seglimit, segsize, rack->r_ctl.fsb.hw_tls);
18377 return (n);
18380 /* Log the buffer level */
18381 static void
18382 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack,
18383 int len, struct timeval *tv,
18384 uint32_t cts)
18386 uint32_t p_rate = 0, p_queue = 0, err = 0;
18387 union tcp_log_stackspecific log;
18389 #ifdef RATELIMIT
18390 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
18391 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
18392 #endif
18393 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18394 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
18395 log.u_bbr.flex1 = p_rate;
18396 log.u_bbr.flex2 = p_queue;
18397 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
18398 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
18399 log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
18400 log.u_bbr.flex7 = 99;
18401 log.u_bbr.flex8 = 0;
18402 log.u_bbr.pkts_out = err;
18403 log.u_bbr.delRate = rack->r_ctl.crte->rate;
18404 log.u_bbr.timeStamp = cts;
18405 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18406 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
18407 len, &log, false, NULL, __func__, __LINE__, tv);
18411 static uint32_t
18412 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp,
18413 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz)
18415 uint64_t lentime = 0;
18416 #ifdef RATELIMIT
18417 uint32_t p_rate = 0, p_queue = 0, err;
18418 union tcp_log_stackspecific log;
18419 uint64_t bw;
18421 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
18422 /* Failed or queue is zero */
18423 if (err || (p_queue == 0)) {
18424 lentime = 0;
18425 goto out;
18427 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
18428 if (err) {
18429 lentime = 0;
18430 goto out;
18433 * If we reach here we have some bytes in
18434 * the queue. The number returned is a value
18435 * between 0 and 0xffff where ffff is full
18436 * and 0 is empty. So how best to make this into
18437 * something usable?
18439 * The "safer" way is lets take the b/w gotten
18440 * from the query (which should be our b/w rate)
18441 * and pretend that a full send (our rc_pace_max_segs)
18442 * is outstanding. We factor it so its as if a full
18443 * number of our MSS segment is terms of full
18444 * ethernet segments are outstanding.
18446 bw = p_rate / 8;
18447 if (bw) {
18448 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz);
18449 lentime *= ETHERNET_SEGMENT_SIZE;
18450 lentime *= (uint64_t)HPTS_USEC_IN_SEC;
18451 lentime /= bw;
18452 } else {
18453 /* TSNH -- KASSERT? */
18454 lentime = 0;
18456 out:
18457 if (tcp_bblogging_on(tp)) {
18458 memset(&log, 0, sizeof(log));
18459 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
18460 log.u_bbr.flex1 = p_rate;
18461 log.u_bbr.flex2 = p_queue;
18462 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
18463 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
18464 log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
18465 log.u_bbr.flex7 = 99;
18466 log.u_bbr.flex8 = 0;
18467 log.u_bbr.pkts_out = err;
18468 log.u_bbr.delRate = rack->r_ctl.crte->rate;
18469 log.u_bbr.cur_del_rate = lentime;
18470 log.u_bbr.timeStamp = cts;
18471 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18472 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
18473 len, &log, false, NULL, __func__, __LINE__,tv);
18475 #endif
18476 return ((uint32_t)lentime);
18479 static int
18480 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
18481 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp)
18484 * Enter the fast retransmit path. We are given that a sched_pin is
18485 * in place (if accounting is compliled in) and the cycle count taken
18486 * at the entry is in the ts_val. The concept her is that the rsm
18487 * now holds the mbuf offsets and such so we can directly transmit
18488 * without a lot of overhead, the len field is already set for
18489 * us to prohibit us from sending too much (usually its 1MSS).
18491 struct ip *ip = NULL;
18492 struct udphdr *udp = NULL;
18493 struct tcphdr *th = NULL;
18494 struct mbuf *m = NULL;
18495 struct inpcb *inp;
18496 uint8_t *cpto;
18497 struct tcp_log_buffer *lgb;
18498 #ifdef TCP_ACCOUNTING
18499 uint64_t crtsc;
18500 int cnt_thru = 1;
18501 #endif
18502 struct tcpopt to;
18503 u_char opt[TCP_MAXOLEN];
18504 uint32_t hdrlen, optlen;
18505 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0;
18506 uint16_t flags;
18507 uint32_t if_hw_tsomaxsegcount = 0, startseq;
18508 uint32_t if_hw_tsomaxsegsize;
18509 int32_t ip_sendflag = IP_NO_SND_TAG_RL;
18511 #ifdef INET6
18512 struct ip6_hdr *ip6 = NULL;
18514 if (rack->r_is_v6) {
18515 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
18516 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
18517 } else
18518 #endif /* INET6 */
18520 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
18521 hdrlen = sizeof(struct tcpiphdr);
18523 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
18524 goto failed;
18526 if (doing_tlp) {
18527 /* Its a TLP add the flag, it may already be there but be sure */
18528 rsm->r_flags |= RACK_TLP;
18529 } else {
18530 /* If it was a TLP it is not not on this retransmit */
18531 rsm->r_flags &= ~RACK_TLP;
18533 startseq = rsm->r_start;
18534 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
18535 inp = rack->rc_inp;
18536 to.to_flags = 0;
18537 flags = tcp_outflags[tp->t_state];
18538 if (flags & (TH_SYN|TH_RST)) {
18539 goto failed;
18541 if (rsm->r_flags & RACK_HAS_FIN) {
18542 /* We can't send a FIN here */
18543 goto failed;
18545 if (flags & TH_FIN) {
18546 /* We never send a FIN */
18547 flags &= ~TH_FIN;
18549 if (tp->t_flags & TF_RCVD_TSTMP) {
18550 to.to_tsval = ms_cts + tp->ts_offset;
18551 to.to_tsecr = tp->ts_recent;
18552 to.to_flags = TOF_TS;
18554 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
18555 /* TCP-MD5 (RFC2385). */
18556 if (tp->t_flags & TF_SIGNATURE)
18557 to.to_flags |= TOF_SIGNATURE;
18558 #endif
18559 optlen = tcp_addoptions(&to, opt);
18560 hdrlen += optlen;
18561 udp = rack->r_ctl.fsb.udp;
18562 if (udp)
18563 hdrlen += sizeof(struct udphdr);
18564 if (rack->r_ctl.rc_pace_max_segs)
18565 max_val = rack->r_ctl.rc_pace_max_segs;
18566 else if (rack->rc_user_set_max_segs)
18567 max_val = rack->rc_user_set_max_segs * segsiz;
18568 else
18569 max_val = len;
18570 if ((tp->t_flags & TF_TSO) &&
18571 V_tcp_do_tso &&
18572 (len > segsiz) &&
18573 (tp->t_port == 0))
18574 tso = 1;
18575 #ifdef INET6
18576 if (MHLEN < hdrlen + max_linkhdr)
18577 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
18578 else
18579 #endif
18580 m = m_gethdr(M_NOWAIT, MT_DATA);
18581 if (m == NULL)
18582 goto failed;
18583 m->m_data += max_linkhdr;
18584 m->m_len = hdrlen;
18585 th = rack->r_ctl.fsb.th;
18586 /* Establish the len to send */
18587 if (len > max_val)
18588 len = max_val;
18589 if ((tso) && (len + optlen > segsiz)) {
18590 uint32_t if_hw_tsomax;
18591 int32_t max_len;
18593 /* extract TSO information */
18594 if_hw_tsomax = tp->t_tsomax;
18595 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
18596 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
18598 * Check if we should limit by maximum payload
18599 * length:
18601 if (if_hw_tsomax != 0) {
18602 /* compute maximum TSO length */
18603 max_len = (if_hw_tsomax - hdrlen -
18604 max_linkhdr);
18605 if (max_len <= 0) {
18606 goto failed;
18607 } else if (len > max_len) {
18608 len = max_len;
18611 if (len <= segsiz) {
18613 * In case there are too many small fragments don't
18614 * use TSO:
18616 tso = 0;
18618 } else {
18619 tso = 0;
18621 if ((tso == 0) && (len > segsiz))
18622 len = segsiz;
18623 (void)tcp_get_usecs(tv);
18624 if ((len == 0) ||
18625 (len <= MHLEN - hdrlen - max_linkhdr)) {
18626 goto failed;
18628 th->th_seq = htonl(rsm->r_start);
18629 th->th_ack = htonl(tp->rcv_nxt);
18631 * The PUSH bit should only be applied
18632 * if the full retransmission is made. If
18633 * we are sending less than this is the
18634 * left hand edge and should not have
18635 * the PUSH bit.
18637 if ((rsm->r_flags & RACK_HAD_PUSH) &&
18638 (len == (rsm->r_end - rsm->r_start)))
18639 flags |= TH_PUSH;
18640 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
18641 if (th->th_win == 0) {
18642 tp->t_sndzerowin++;
18643 tp->t_flags |= TF_RXWIN0SENT;
18644 } else
18645 tp->t_flags &= ~TF_RXWIN0SENT;
18646 if (rsm->r_flags & RACK_TLP) {
18648 * TLP should not count in retran count, but
18649 * in its own bin
18651 counter_u64_add(rack_tlp_retran, 1);
18652 counter_u64_add(rack_tlp_retran_bytes, len);
18653 } else {
18654 tp->t_sndrexmitpack++;
18655 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
18656 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
18658 #ifdef STATS
18659 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
18660 len);
18661 #endif
18662 if (rsm->m == NULL)
18663 goto failed;
18664 if (rsm->m &&
18665 ((rsm->orig_m_len != rsm->m->m_len) ||
18666 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
18667 /* Fix up the orig_m_len and possibly the mbuf offset */
18668 rack_adjust_orig_mlen(rsm);
18670 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
18671 if (len <= segsiz) {
18673 * Must have ran out of mbufs for the copy
18674 * shorten it to no longer need tso. Lets
18675 * not put on sendalot since we are low on
18676 * mbufs.
18678 tso = 0;
18680 if ((m->m_next == NULL) || (len <= 0)){
18681 goto failed;
18683 if (udp) {
18684 if (rack->r_is_v6)
18685 ulen = hdrlen + len - sizeof(struct ip6_hdr);
18686 else
18687 ulen = hdrlen + len - sizeof(struct ip);
18688 udp->uh_ulen = htons(ulen);
18690 m->m_pkthdr.rcvif = (struct ifnet *)0;
18691 if (TCPS_HAVERCVDSYN(tp->t_state) &&
18692 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
18693 int ect = tcp_ecn_output_established(tp, &flags, len, true);
18694 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
18695 (tp->t_flags2 & TF2_ECN_SND_ECE))
18696 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
18697 #ifdef INET6
18698 if (rack->r_is_v6) {
18699 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
18700 ip6->ip6_flow |= htonl(ect << 20);
18702 else
18703 #endif
18705 ip->ip_tos &= ~IPTOS_ECN_MASK;
18706 ip->ip_tos |= ect;
18709 if (rack->r_ctl.crte != NULL) {
18710 /* See if we can send via the hw queue */
18711 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz);
18712 /* If there is nothing in queue (no pacing time) we can send via the hw queue */
18713 if (slot == 0)
18714 ip_sendflag = 0;
18716 tcp_set_flags(th, flags);
18717 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
18718 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
18719 if (to.to_flags & TOF_SIGNATURE) {
18721 * Calculate MD5 signature and put it into the place
18722 * determined before.
18723 * NOTE: since TCP options buffer doesn't point into
18724 * mbuf's data, calculate offset and use it.
18726 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
18727 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
18729 * Do not send segment if the calculation of MD5
18730 * digest has failed.
18732 goto failed;
18735 #endif
18736 #ifdef INET6
18737 if (rack->r_is_v6) {
18738 if (tp->t_port) {
18739 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
18740 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18741 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
18742 th->th_sum = htons(0);
18743 UDPSTAT_INC(udps_opackets);
18744 } else {
18745 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
18746 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18747 th->th_sum = in6_cksum_pseudo(ip6,
18748 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
18752 #endif
18753 #if defined(INET6) && defined(INET)
18754 else
18755 #endif
18756 #ifdef INET
18758 if (tp->t_port) {
18759 m->m_pkthdr.csum_flags = CSUM_UDP;
18760 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
18761 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
18762 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
18763 th->th_sum = htons(0);
18764 UDPSTAT_INC(udps_opackets);
18765 } else {
18766 m->m_pkthdr.csum_flags = CSUM_TCP;
18767 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
18768 th->th_sum = in_pseudo(ip->ip_src.s_addr,
18769 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
18770 IPPROTO_TCP + len + optlen));
18772 /* IP version must be set here for ipv4/ipv6 checking later */
18773 KASSERT(ip->ip_v == IPVERSION,
18774 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
18776 #endif
18777 if (tso) {
18779 * Here we use segsiz since we have no added options besides
18780 * any standard timestamp options (no DSACKs or SACKS are sent
18781 * via either fast-path).
18783 KASSERT(len > segsiz,
18784 ("%s: len <= tso_segsz tp:%p", __func__, tp));
18785 m->m_pkthdr.csum_flags |= CSUM_TSO;
18786 m->m_pkthdr.tso_segsz = segsiz;
18788 #ifdef INET6
18789 if (rack->r_is_v6) {
18790 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
18791 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
18792 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
18793 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18794 else
18795 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18797 #endif
18798 #if defined(INET) && defined(INET6)
18799 else
18800 #endif
18801 #ifdef INET
18803 ip->ip_len = htons(m->m_pkthdr.len);
18804 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
18805 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18806 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18807 if (tp->t_port == 0 || len < V_tcp_minmss) {
18808 ip->ip_off |= htons(IP_DF);
18810 } else {
18811 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18814 #endif
18815 if (doing_tlp == 0) {
18816 /* Set we retransmitted */
18817 rack->rc_gp_saw_rec = 1;
18818 } else {
18819 /* Its a TLP set ca or ss */
18820 if (tp->snd_cwnd > tp->snd_ssthresh) {
18821 /* Set we sent in CA */
18822 rack->rc_gp_saw_ca = 1;
18823 } else {
18824 /* Set we sent in SS */
18825 rack->rc_gp_saw_ss = 1;
18828 /* Time to copy in our header */
18829 cpto = mtod(m, uint8_t *);
18830 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
18831 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
18832 if (optlen) {
18833 bcopy(opt, th + 1, optlen);
18834 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
18835 } else {
18836 th->th_off = sizeof(struct tcphdr) >> 2;
18838 if (tcp_bblogging_on(rack->rc_tp)) {
18839 union tcp_log_stackspecific log;
18841 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
18842 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
18843 counter_u64_add(rack_collapsed_win_rxt, 1);
18844 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
18846 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
18847 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
18848 if (rack->rack_no_prr)
18849 log.u_bbr.flex1 = 0;
18850 else
18851 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
18852 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
18853 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
18854 log.u_bbr.flex4 = max_val;
18855 /* Save off the early/late values */
18856 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
18857 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
18858 log.u_bbr.bw_inuse = rack_get_bw(rack);
18859 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
18860 if (doing_tlp == 0)
18861 log.u_bbr.flex8 = 1;
18862 else
18863 log.u_bbr.flex8 = 2;
18864 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
18865 log.u_bbr.flex7 = 55;
18866 log.u_bbr.pkts_out = tp->t_maxseg;
18867 log.u_bbr.timeStamp = cts;
18868 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
18869 if (rsm->r_rtr_cnt > 0) {
18871 * When we have a retransmit we want to log the
18872 * burst at send and flight at send from before.
18874 log.u_bbr.flex5 = rsm->r_fas;
18875 log.u_bbr.bbr_substate = rsm->r_bas;
18876 } else {
18878 * This is currently unlikely until we do the
18879 * packet pair probes but I will add it for completeness.
18881 log.u_bbr.flex5 = log.u_bbr.inflight;
18882 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
18884 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
18885 log.u_bbr.delivered = 0;
18886 log.u_bbr.rttProp = (uintptr_t)rsm;
18887 log.u_bbr.delRate = rsm->r_flags;
18888 log.u_bbr.delRate <<= 31;
18889 log.u_bbr.delRate |= rack->r_must_retran;
18890 log.u_bbr.delRate <<= 1;
18891 log.u_bbr.delRate |= 1;
18892 log.u_bbr.pkt_epoch = __LINE__;
18893 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
18894 len, &log, false, NULL, __func__, __LINE__, tv);
18895 } else
18896 lgb = NULL;
18897 if ((rack->r_ctl.crte != NULL) &&
18898 tcp_bblogging_on(tp)) {
18899 rack_log_queue_level(tp, rack, len, tv, cts);
18901 #ifdef INET6
18902 if (rack->r_is_v6) {
18903 error = ip6_output(m, inp->in6p_outputopts,
18904 &inp->inp_route6,
18905 ip_sendflag, NULL, NULL, inp);
18907 else
18908 #endif
18909 #ifdef INET
18911 error = ip_output(m, NULL,
18912 &inp->inp_route,
18913 ip_sendflag, 0, inp);
18915 #endif
18916 m = NULL;
18917 if (lgb) {
18918 lgb->tlb_errno = error;
18919 lgb = NULL;
18921 /* Move snd_nxt to snd_max so we don't have false retransmissions */
18922 tp->snd_nxt = tp->snd_max;
18923 if (error) {
18924 goto failed;
18925 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) {
18926 rack->rc_hw_nobuf = 0;
18927 rack->r_ctl.rc_agg_delayed = 0;
18928 rack->r_early = 0;
18929 rack->r_late = 0;
18930 rack->r_ctl.rc_agg_early = 0;
18932 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
18933 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz);
18934 if (doing_tlp) {
18935 rack->rc_tlp_in_progress = 1;
18936 rack->r_ctl.rc_tlp_cnt_out++;
18938 if (error == 0) {
18939 counter_u64_add(rack_total_bytes, len);
18940 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
18941 if (doing_tlp) {
18942 rack->rc_last_sent_tlp_past_cumack = 0;
18943 rack->rc_last_sent_tlp_seq_valid = 1;
18944 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
18945 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
18947 if (rack->r_ctl.rc_prr_sndcnt >= len)
18948 rack->r_ctl.rc_prr_sndcnt -= len;
18949 else
18950 rack->r_ctl.rc_prr_sndcnt = 0;
18952 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18953 rack->forced_ack = 0; /* If we send something zap the FA flag */
18954 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18955 rack->r_ctl.retran_during_recovery += len;
18957 int idx;
18959 idx = (len / segsiz) + 3;
18960 if (idx >= TCP_MSS_ACCT_ATIMER)
18961 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18962 else
18963 counter_u64_add(rack_out_size[idx], 1);
18965 if (tp->t_rtttime == 0) {
18966 tp->t_rtttime = ticks;
18967 tp->t_rtseq = startseq;
18968 KMOD_TCPSTAT_INC(tcps_segstimed);
18970 counter_u64_add(rack_fto_rsm_send, 1);
18971 if (error && (error == ENOBUFS)) {
18972 if (rack->r_ctl.crte != NULL) {
18973 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
18974 if (tcp_bblogging_on(rack->rc_tp))
18975 rack_log_queue_level(tp, rack, len, tv, cts);
18976 } else
18977 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
18978 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18979 if (rack->rc_enobuf < 0x7f)
18980 rack->rc_enobuf++;
18981 if (slot < (10 * HPTS_USEC_IN_MSEC))
18982 slot = 10 * HPTS_USEC_IN_MSEC;
18983 if (rack->r_ctl.crte != NULL) {
18984 counter_u64_add(rack_saw_enobuf_hw, 1);
18985 tcp_rl_log_enobuf(rack->r_ctl.crte);
18987 counter_u64_add(rack_saw_enobuf, 1);
18988 } else {
18989 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__);
18991 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
18992 #ifdef TCP_ACCOUNTING
18993 crtsc = get_cyclecount();
18994 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18995 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
18997 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18998 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
19000 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19001 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
19003 sched_unpin();
19004 #endif
19005 return (0);
19006 failed:
19007 if (m)
19008 m_free(m);
19009 return (-1);
19012 static void
19013 rack_sndbuf_autoscale(struct tcp_rack *rack)
19016 * Automatic sizing of send socket buffer. Often the send buffer
19017 * size is not optimally adjusted to the actual network conditions
19018 * at hand (delay bandwidth product). Setting the buffer size too
19019 * small limits throughput on links with high bandwidth and high
19020 * delay (eg. trans-continental/oceanic links). Setting the
19021 * buffer size too big consumes too much real kernel memory,
19022 * especially with many connections on busy servers.
19024 * The criteria to step up the send buffer one notch are:
19025 * 1. receive window of remote host is larger than send buffer
19026 * (with a fudge factor of 5/4th);
19027 * 2. send buffer is filled to 7/8th with data (so we actually
19028 * have data to make use of it);
19029 * 3. send buffer fill has not hit maximal automatic size;
19030 * 4. our send window (slow start and cogestion controlled) is
19031 * larger than sent but unacknowledged data in send buffer.
19033 * Note that the rack version moves things much faster since
19034 * we want to avoid hitting cache lines in the rack_fast_output()
19035 * path so this is called much less often and thus moves
19036 * the SB forward by a percentage.
19038 struct socket *so;
19039 struct tcpcb *tp;
19040 uint32_t sendwin, scaleup;
19042 tp = rack->rc_tp;
19043 so = rack->rc_inp->inp_socket;
19044 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
19045 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
19046 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
19047 sbused(&so->so_snd) >=
19048 (so->so_snd.sb_hiwat / 8 * 7) &&
19049 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
19050 sendwin >= (sbused(&so->so_snd) -
19051 (tp->snd_max - tp->snd_una))) {
19052 if (rack_autosndbuf_inc)
19053 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
19054 else
19055 scaleup = V_tcp_autosndbuf_inc;
19056 if (scaleup < V_tcp_autosndbuf_inc)
19057 scaleup = V_tcp_autosndbuf_inc;
19058 scaleup += so->so_snd.sb_hiwat;
19059 if (scaleup > V_tcp_autosndbuf_max)
19060 scaleup = V_tcp_autosndbuf_max;
19061 if (!sbreserve_locked(so, SO_SND, scaleup, curthread))
19062 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
19067 static int
19068 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
19069 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
19072 * Enter to do fast output. We are given that the sched_pin is
19073 * in place (if accounting is compiled in) and the cycle count taken
19074 * at entry is in place in ts_val. The idea here is that
19075 * we know how many more bytes needs to be sent (presumably either
19076 * during pacing or to fill the cwnd and that was greater than
19077 * the max-burst). We have how much to send and all the info we
19078 * need to just send.
19080 #ifdef INET
19081 struct ip *ip = NULL;
19082 #endif
19083 struct udphdr *udp = NULL;
19084 struct tcphdr *th = NULL;
19085 struct mbuf *m, *s_mb;
19086 struct inpcb *inp;
19087 uint8_t *cpto;
19088 struct tcp_log_buffer *lgb;
19089 #ifdef TCP_ACCOUNTING
19090 uint64_t crtsc;
19091 #endif
19092 struct tcpopt to;
19093 u_char opt[TCP_MAXOLEN];
19094 uint32_t hdrlen, optlen;
19095 #ifdef TCP_ACCOUNTING
19096 int cnt_thru = 1;
19097 #endif
19098 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
19099 uint16_t flags;
19100 uint32_t s_soff;
19101 uint32_t if_hw_tsomaxsegcount = 0, startseq;
19102 uint32_t if_hw_tsomaxsegsize;
19103 uint32_t add_flag = RACK_SENT_FP;
19104 #ifdef INET6
19105 struct ip6_hdr *ip6 = NULL;
19107 if (rack->r_is_v6) {
19108 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
19109 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
19110 } else
19111 #endif /* INET6 */
19113 #ifdef INET
19114 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
19115 hdrlen = sizeof(struct tcpiphdr);
19116 #endif
19118 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
19119 m = NULL;
19120 goto failed;
19122 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
19123 startseq = tp->snd_max;
19124 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
19125 inp = rack->rc_inp;
19126 len = rack->r_ctl.fsb.left_to_send;
19127 to.to_flags = 0;
19128 flags = rack->r_ctl.fsb.tcp_flags;
19129 if (tp->t_flags & TF_RCVD_TSTMP) {
19130 to.to_tsval = ms_cts + tp->ts_offset;
19131 to.to_tsecr = tp->ts_recent;
19132 to.to_flags = TOF_TS;
19134 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
19135 /* TCP-MD5 (RFC2385). */
19136 if (tp->t_flags & TF_SIGNATURE)
19137 to.to_flags |= TOF_SIGNATURE;
19138 #endif
19139 optlen = tcp_addoptions(&to, opt);
19140 hdrlen += optlen;
19141 udp = rack->r_ctl.fsb.udp;
19142 if (udp)
19143 hdrlen += sizeof(struct udphdr);
19144 if (rack->r_ctl.rc_pace_max_segs)
19145 max_val = rack->r_ctl.rc_pace_max_segs;
19146 else if (rack->rc_user_set_max_segs)
19147 max_val = rack->rc_user_set_max_segs * segsiz;
19148 else
19149 max_val = len;
19150 if ((tp->t_flags & TF_TSO) &&
19151 V_tcp_do_tso &&
19152 (len > segsiz) &&
19153 (tp->t_port == 0))
19154 tso = 1;
19155 again:
19156 #ifdef INET6
19157 if (MHLEN < hdrlen + max_linkhdr)
19158 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
19159 else
19160 #endif
19161 m = m_gethdr(M_NOWAIT, MT_DATA);
19162 if (m == NULL)
19163 goto failed;
19164 m->m_data += max_linkhdr;
19165 m->m_len = hdrlen;
19166 th = rack->r_ctl.fsb.th;
19167 /* Establish the len to send */
19168 if (len > max_val)
19169 len = max_val;
19170 if ((tso) && (len + optlen > segsiz)) {
19171 uint32_t if_hw_tsomax;
19172 int32_t max_len;
19174 /* extract TSO information */
19175 if_hw_tsomax = tp->t_tsomax;
19176 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
19177 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
19179 * Check if we should limit by maximum payload
19180 * length:
19182 if (if_hw_tsomax != 0) {
19183 /* compute maximum TSO length */
19184 max_len = (if_hw_tsomax - hdrlen -
19185 max_linkhdr);
19186 if (max_len <= 0) {
19187 goto failed;
19188 } else if (len > max_len) {
19189 len = max_len;
19192 if (len <= segsiz) {
19194 * In case there are too many small fragments don't
19195 * use TSO:
19197 tso = 0;
19199 } else {
19200 tso = 0;
19202 if ((tso == 0) && (len > segsiz))
19203 len = segsiz;
19204 (void)tcp_get_usecs(tv);
19205 if ((len == 0) ||
19206 (len <= MHLEN - hdrlen - max_linkhdr)) {
19207 goto failed;
19209 sb_offset = tp->snd_max - tp->snd_una;
19210 th->th_seq = htonl(tp->snd_max);
19211 th->th_ack = htonl(tp->rcv_nxt);
19212 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
19213 if (th->th_win == 0) {
19214 tp->t_sndzerowin++;
19215 tp->t_flags |= TF_RXWIN0SENT;
19216 } else
19217 tp->t_flags &= ~TF_RXWIN0SENT;
19218 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
19219 KMOD_TCPSTAT_INC(tcps_sndpack);
19220 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
19221 #ifdef STATS
19222 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
19223 len);
19224 #endif
19225 if (rack->r_ctl.fsb.m == NULL)
19226 goto failed;
19228 /* s_mb and s_soff are saved for rack_log_output */
19229 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
19230 &s_mb, &s_soff);
19231 if (len <= segsiz) {
19233 * Must have ran out of mbufs for the copy
19234 * shorten it to no longer need tso. Lets
19235 * not put on sendalot since we are low on
19236 * mbufs.
19238 tso = 0;
19240 if (rack->r_ctl.fsb.rfo_apply_push &&
19241 (len == rack->r_ctl.fsb.left_to_send)) {
19242 tcp_set_flags(th, flags | TH_PUSH);
19243 add_flag |= RACK_HAD_PUSH;
19245 if ((m->m_next == NULL) || (len <= 0)){
19246 goto failed;
19248 if (udp) {
19249 if (rack->r_is_v6)
19250 ulen = hdrlen + len - sizeof(struct ip6_hdr);
19251 else
19252 ulen = hdrlen + len - sizeof(struct ip);
19253 udp->uh_ulen = htons(ulen);
19255 m->m_pkthdr.rcvif = (struct ifnet *)0;
19256 if (TCPS_HAVERCVDSYN(tp->t_state) &&
19257 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
19258 int ect = tcp_ecn_output_established(tp, &flags, len, false);
19259 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
19260 (tp->t_flags2 & TF2_ECN_SND_ECE))
19261 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
19262 #ifdef INET6
19263 if (rack->r_is_v6) {
19264 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
19265 ip6->ip6_flow |= htonl(ect << 20);
19267 else
19268 #endif
19270 #ifdef INET
19271 ip->ip_tos &= ~IPTOS_ECN_MASK;
19272 ip->ip_tos |= ect;
19273 #endif
19276 tcp_set_flags(th, flags);
19277 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
19278 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
19279 if (to.to_flags & TOF_SIGNATURE) {
19281 * Calculate MD5 signature and put it into the place
19282 * determined before.
19283 * NOTE: since TCP options buffer doesn't point into
19284 * mbuf's data, calculate offset and use it.
19286 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
19287 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
19289 * Do not send segment if the calculation of MD5
19290 * digest has failed.
19292 goto failed;
19295 #endif
19296 #ifdef INET6
19297 if (rack->r_is_v6) {
19298 if (tp->t_port) {
19299 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
19300 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
19301 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
19302 th->th_sum = htons(0);
19303 UDPSTAT_INC(udps_opackets);
19304 } else {
19305 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
19306 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
19307 th->th_sum = in6_cksum_pseudo(ip6,
19308 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
19312 #endif
19313 #if defined(INET6) && defined(INET)
19314 else
19315 #endif
19316 #ifdef INET
19318 if (tp->t_port) {
19319 m->m_pkthdr.csum_flags = CSUM_UDP;
19320 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
19321 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
19322 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
19323 th->th_sum = htons(0);
19324 UDPSTAT_INC(udps_opackets);
19325 } else {
19326 m->m_pkthdr.csum_flags = CSUM_TCP;
19327 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
19328 th->th_sum = in_pseudo(ip->ip_src.s_addr,
19329 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
19330 IPPROTO_TCP + len + optlen));
19332 /* IP version must be set here for ipv4/ipv6 checking later */
19333 KASSERT(ip->ip_v == IPVERSION,
19334 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
19336 #endif
19337 if (tso) {
19339 * Here we use segsiz since we have no added options besides
19340 * any standard timestamp options (no DSACKs or SACKS are sent
19341 * via either fast-path).
19343 KASSERT(len > segsiz,
19344 ("%s: len <= tso_segsz tp:%p", __func__, tp));
19345 m->m_pkthdr.csum_flags |= CSUM_TSO;
19346 m->m_pkthdr.tso_segsz = segsiz;
19348 #ifdef INET6
19349 if (rack->r_is_v6) {
19350 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
19351 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
19352 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
19353 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
19354 else
19355 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
19357 #endif
19358 #if defined(INET) && defined(INET6)
19359 else
19360 #endif
19361 #ifdef INET
19363 ip->ip_len = htons(m->m_pkthdr.len);
19364 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
19365 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
19366 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
19367 if (tp->t_port == 0 || len < V_tcp_minmss) {
19368 ip->ip_off |= htons(IP_DF);
19370 } else {
19371 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
19374 #endif
19375 if (tp->snd_cwnd > tp->snd_ssthresh) {
19376 /* Set we sent in CA */
19377 rack->rc_gp_saw_ca = 1;
19378 } else {
19379 /* Set we sent in SS */
19380 rack->rc_gp_saw_ss = 1;
19382 /* Time to copy in our header */
19383 cpto = mtod(m, uint8_t *);
19384 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
19385 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
19386 if (optlen) {
19387 bcopy(opt, th + 1, optlen);
19388 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
19389 } else {
19390 th->th_off = sizeof(struct tcphdr) >> 2;
19392 if ((rack->r_ctl.crte != NULL) &&
19393 tcp_bblogging_on(tp)) {
19394 rack_log_queue_level(tp, rack, len, tv, cts);
19396 if (tcp_bblogging_on(rack->rc_tp)) {
19397 union tcp_log_stackspecific log;
19399 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
19400 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
19401 if (rack->rack_no_prr)
19402 log.u_bbr.flex1 = 0;
19403 else
19404 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
19405 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
19406 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
19407 log.u_bbr.flex4 = max_val;
19408 /* Save off the early/late values */
19409 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
19410 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
19411 log.u_bbr.bw_inuse = rack_get_bw(rack);
19412 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
19413 log.u_bbr.flex8 = 0;
19414 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
19415 log.u_bbr.flex7 = 44;
19416 log.u_bbr.pkts_out = tp->t_maxseg;
19417 log.u_bbr.timeStamp = cts;
19418 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
19419 log.u_bbr.flex5 = log.u_bbr.inflight;
19420 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
19421 log.u_bbr.delivered = 0;
19422 log.u_bbr.rttProp = 0;
19423 log.u_bbr.delRate = rack->r_must_retran;
19424 log.u_bbr.delRate <<= 1;
19425 log.u_bbr.pkt_epoch = __LINE__;
19426 /* For fast output no retrans so just inflight and how many mss we send */
19427 log.u_bbr.flex5 = log.u_bbr.inflight;
19428 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
19429 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
19430 len, &log, false, NULL, __func__, __LINE__, tv);
19431 } else
19432 lgb = NULL;
19433 #ifdef INET6
19434 if (rack->r_is_v6) {
19435 error = ip6_output(m, inp->in6p_outputopts,
19436 &inp->inp_route6,
19437 0, NULL, NULL, inp);
19439 #endif
19440 #if defined(INET) && defined(INET6)
19441 else
19442 #endif
19443 #ifdef INET
19445 error = ip_output(m, NULL,
19446 &inp->inp_route,
19447 0, 0, inp);
19449 #endif
19450 if (lgb) {
19451 lgb->tlb_errno = error;
19452 lgb = NULL;
19454 if (error) {
19455 *send_err = error;
19456 m = NULL;
19457 goto failed;
19458 } else if (rack->rc_hw_nobuf) {
19459 rack->rc_hw_nobuf = 0;
19460 rack->r_ctl.rc_agg_delayed = 0;
19461 rack->r_early = 0;
19462 rack->r_late = 0;
19463 rack->r_ctl.rc_agg_early = 0;
19465 if ((error == 0) && (rack->lt_bw_up == 0)) {
19466 /* Unlikely */
19467 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv);
19468 rack->r_ctl.lt_seq = tp->snd_una;
19469 rack->lt_bw_up = 1;
19470 } else if ((error == 0) &&
19471 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) {
19473 * Need to record what we have since we are
19474 * approaching seq wrap.
19476 struct timeval tv;
19477 uint64_t tmark;
19479 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq);
19480 rack->r_ctl.lt_seq = tp->snd_una;
19481 tmark = tcp_get_u64_usecs(&tv);
19482 if (tmark > rack->r_ctl.lt_timemark) {
19483 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
19484 rack->r_ctl.lt_timemark = tmark;
19487 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
19488 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz);
19489 if (tp->snd_una == tp->snd_max) {
19490 rack->r_ctl.rc_tlp_rxt_last_time = cts;
19491 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
19492 tp->t_acktime = ticks;
19494 counter_u64_add(rack_total_bytes, len);
19495 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
19497 rack->forced_ack = 0; /* If we send something zap the FA flag */
19498 tot_len += len;
19499 if ((tp->t_flags & TF_GPUTINPROG) == 0)
19500 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
19501 tp->snd_max += len;
19502 tp->snd_nxt = tp->snd_max;
19503 if (rack->rc_new_rnd_needed) {
19504 rack_new_round_starts(tp, rack, tp->snd_max);
19507 int idx;
19509 idx = (len / segsiz) + 3;
19510 if (idx >= TCP_MSS_ACCT_ATIMER)
19511 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
19512 else
19513 counter_u64_add(rack_out_size[idx], 1);
19515 if (len <= rack->r_ctl.fsb.left_to_send)
19516 rack->r_ctl.fsb.left_to_send -= len;
19517 else
19518 rack->r_ctl.fsb.left_to_send = 0;
19519 if (rack->r_ctl.fsb.left_to_send < segsiz) {
19520 rack->r_fast_output = 0;
19521 rack->r_ctl.fsb.left_to_send = 0;
19522 /* At the end of fast_output scale up the sb */
19523 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket);
19524 rack_sndbuf_autoscale(rack);
19525 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket);
19527 if (tp->t_rtttime == 0) {
19528 tp->t_rtttime = ticks;
19529 tp->t_rtseq = startseq;
19530 KMOD_TCPSTAT_INC(tcps_segstimed);
19532 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
19533 (max_val > len) &&
19534 (tso == 0)) {
19535 max_val -= len;
19536 len = segsiz;
19537 th = rack->r_ctl.fsb.th;
19538 #ifdef TCP_ACCOUNTING
19539 cnt_thru++;
19540 #endif
19541 goto again;
19543 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
19544 counter_u64_add(rack_fto_send, 1);
19545 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__);
19546 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
19547 #ifdef TCP_ACCOUNTING
19548 crtsc = get_cyclecount();
19549 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19550 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
19552 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19553 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
19555 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19556 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
19558 sched_unpin();
19559 #endif
19560 return (0);
19561 failed:
19562 if (m)
19563 m_free(m);
19564 rack->r_fast_output = 0;
19565 return (-1);
19568 static inline void
19569 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack,
19570 struct sockbuf *sb,
19571 int len, int orig_len, int segsiz, uint32_t pace_max_seg,
19572 bool hw_tls,
19573 uint16_t flags)
19575 rack->r_fast_output = 1;
19576 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
19577 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
19578 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
19579 rack->r_ctl.fsb.tcp_flags = flags;
19580 rack->r_ctl.fsb.left_to_send = orig_len - len;
19581 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) {
19582 /* Less than a full sized pace, lets not */
19583 rack->r_fast_output = 0;
19584 return;
19585 } else {
19586 /* Round down to the nearest pace_max_seg */
19587 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg);
19589 if (hw_tls)
19590 rack->r_ctl.fsb.hw_tls = 1;
19591 else
19592 rack->r_ctl.fsb.hw_tls = 0;
19593 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
19594 ("rack:%p left_to_send:%u sbavail:%u out:%u",
19595 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
19596 (tp->snd_max - tp->snd_una)));
19597 if (rack->r_ctl.fsb.left_to_send < segsiz)
19598 rack->r_fast_output = 0;
19599 else {
19600 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
19601 rack->r_ctl.fsb.rfo_apply_push = 1;
19602 else
19603 rack->r_ctl.fsb.rfo_apply_push = 0;
19607 static uint32_t
19608 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz)
19610 uint64_t min_time;
19611 uint32_t maxlen;
19613 min_time = (uint64_t)get_hpts_min_sleep_time();
19614 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC);
19615 maxlen = roundup(maxlen, segsiz);
19616 return (maxlen);
19619 static struct rack_sendmap *
19620 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts)
19622 struct rack_sendmap *rsm = NULL;
19623 int thresh;
19625 restart:
19626 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
19627 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
19628 /* Nothing, strange turn off validity */
19629 rack->r_collapse_point_valid = 0;
19630 return (NULL);
19632 /* Can we send it yet? */
19633 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
19635 * Receiver window has not grown enough for
19636 * the segment to be put on the wire.
19638 return (NULL);
19640 if (rsm->r_flags & RACK_ACKED) {
19642 * It has been sacked, lets move to the
19643 * next one if possible.
19645 rack->r_ctl.last_collapse_point = rsm->r_end;
19646 /* Are we done? */
19647 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
19648 rack->r_ctl.high_collapse_point)) {
19649 rack->r_collapse_point_valid = 0;
19650 return (NULL);
19652 goto restart;
19654 /* Now has it been long enough ? */
19655 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1);
19656 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
19657 rack_log_collapse(rack, rsm->r_start,
19658 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19659 thresh, __LINE__, 6, rsm->r_flags, rsm);
19660 return (rsm);
19662 /* Not enough time */
19663 rack_log_collapse(rack, rsm->r_start,
19664 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19665 thresh, __LINE__, 7, rsm->r_flags, rsm);
19666 return (NULL);
19669 static inline void
19670 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg)
19672 if ((rack->full_size_rxt == 0) &&
19673 (rack->shape_rxt_to_pacing_min == 0) &&
19674 (*len >= segsiz)) {
19675 *len = segsiz;
19676 } else if (rack->shape_rxt_to_pacing_min &&
19677 rack->gp_ready) {
19678 /* We use pacing min as shaping len req */
19679 uint32_t maxlen;
19681 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
19682 if (*len > maxlen)
19683 *len = maxlen;
19684 } else {
19686 * The else is full_size_rxt is on so send it all
19687 * note we do need to check this for exceeding
19688 * our max segment size due to the fact that
19689 * we do sometimes merge chunks together i.e.
19690 * we cannot just assume that we will never have
19691 * a chunk greater than pace_max_seg
19693 if (*len > pace_max_seg)
19694 *len = pace_max_seg;
19698 static int
19699 rack_output(struct tcpcb *tp)
19701 struct socket *so;
19702 uint32_t recwin;
19703 uint32_t sb_offset, s_moff = 0;
19704 int32_t len, error = 0;
19705 uint16_t flags;
19706 struct mbuf *m, *s_mb = NULL;
19707 struct mbuf *mb;
19708 uint32_t if_hw_tsomaxsegcount = 0;
19709 uint32_t if_hw_tsomaxsegsize;
19710 int32_t segsiz, minseg;
19711 long tot_len_this_send = 0;
19712 #ifdef INET
19713 struct ip *ip = NULL;
19714 #endif
19715 struct udphdr *udp = NULL;
19716 struct tcp_rack *rack;
19717 struct tcphdr *th;
19718 uint8_t pass = 0;
19719 uint8_t mark = 0;
19720 uint8_t check_done = 0;
19721 uint8_t wanted_cookie = 0;
19722 u_char opt[TCP_MAXOLEN];
19723 unsigned ipoptlen, optlen, hdrlen, ulen=0;
19724 uint32_t rack_seq;
19726 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
19727 unsigned ipsec_optlen = 0;
19729 #endif
19730 int32_t idle, sendalot;
19731 uint32_t tot_idle;
19732 int32_t sub_from_prr = 0;
19733 volatile int32_t sack_rxmit;
19734 struct rack_sendmap *rsm = NULL;
19735 int32_t tso, mtu;
19736 struct tcpopt to;
19737 int32_t slot = 0;
19738 int32_t sup_rack = 0;
19739 uint32_t cts, ms_cts, delayed, early;
19740 uint32_t add_flag = RACK_SENT_SP;
19741 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */
19742 uint8_t doing_tlp = 0;
19743 uint32_t cwnd_to_use, pace_max_seg;
19744 int32_t do_a_prefetch = 0;
19745 int32_t prefetch_rsm = 0;
19746 int32_t orig_len = 0;
19747 struct timeval tv;
19748 int32_t prefetch_so_done = 0;
19749 struct tcp_log_buffer *lgb;
19750 struct inpcb *inp = tptoinpcb(tp);
19751 struct sockbuf *sb;
19752 uint64_t ts_val = 0;
19753 #ifdef TCP_ACCOUNTING
19754 uint64_t crtsc;
19755 #endif
19756 #ifdef INET6
19757 struct ip6_hdr *ip6 = NULL;
19758 int32_t isipv6;
19759 #endif
19760 bool hpts_calling, hw_tls = false;
19762 NET_EPOCH_ASSERT();
19763 INP_WLOCK_ASSERT(inp);
19765 /* setup and take the cache hits here */
19766 rack = (struct tcp_rack *)tp->t_fb_ptr;
19767 #ifdef TCP_ACCOUNTING
19768 sched_pin();
19769 ts_val = get_cyclecount();
19770 #endif
19771 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS);
19772 tp->t_flags2 &= ~TF2_HPTS_CALLS;
19773 #ifdef TCP_OFFLOAD
19774 if (tp->t_flags & TF_TOE) {
19775 #ifdef TCP_ACCOUNTING
19776 sched_unpin();
19777 #endif
19778 return (tcp_offload_output(tp));
19780 #endif
19781 if (rack->rack_deferred_inited == 0) {
19783 * If we are the connecting socket we will
19784 * hit rack_init() when no sequence numbers
19785 * are setup. This makes it so we must defer
19786 * some initialization. Call that now.
19788 rack_deferred_init(tp, rack);
19791 * For TFO connections in SYN_RECEIVED, only allow the initial
19792 * SYN|ACK and those sent by the retransmit timer.
19794 if ((tp->t_flags & TF_FASTOPEN) &&
19795 (tp->t_state == TCPS_SYN_RECEIVED) &&
19796 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
19797 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
19798 #ifdef TCP_ACCOUNTING
19799 sched_unpin();
19800 #endif
19801 return (0);
19803 #ifdef INET6
19804 if (rack->r_state) {
19805 /* Use the cache line loaded if possible */
19806 isipv6 = rack->r_is_v6;
19807 } else {
19808 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
19810 #endif
19811 early = 0;
19812 cts = tcp_get_usecs(&tv);
19813 ms_cts = tcp_tv_to_mssectick(&tv);
19814 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
19815 tcp_in_hpts(rack->rc_tp)) {
19817 * We are on the hpts for some timer but not hptsi output.
19818 * Remove from the hpts unconditionally.
19820 rack_timer_cancel(tp, rack, cts, __LINE__);
19822 /* Are we pacing and late? */
19823 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
19824 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
19825 /* We are delayed */
19826 delayed = cts - rack->r_ctl.rc_last_output_to;
19827 } else {
19828 delayed = 0;
19830 /* Do the timers, which may override the pacer */
19831 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
19832 int retval;
19834 retval = rack_process_timers(tp, rack, cts, hpts_calling,
19835 &doing_tlp);
19836 if (retval != 0) {
19837 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
19838 #ifdef TCP_ACCOUNTING
19839 sched_unpin();
19840 #endif
19842 * If timers want tcp_drop(), then pass error out,
19843 * otherwise suppress it.
19845 return (retval < 0 ? retval : 0);
19848 if (rack->rc_in_persist) {
19849 if (tcp_in_hpts(rack->rc_tp) == 0) {
19850 /* Timer is not running */
19851 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
19853 #ifdef TCP_ACCOUNTING
19854 sched_unpin();
19855 #endif
19856 return (0);
19858 if ((rack->rc_ack_required == 1) &&
19859 (rack->r_timer_override == 0)){
19860 /* A timeout occurred and no ack has arrived */
19861 if (tcp_in_hpts(rack->rc_tp) == 0) {
19862 /* Timer is not running */
19863 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
19865 #ifdef TCP_ACCOUNTING
19866 sched_unpin();
19867 #endif
19868 return (0);
19870 if ((rack->r_timer_override) ||
19871 (rack->rc_ack_can_sendout_data) ||
19872 (delayed) ||
19873 (tp->t_state < TCPS_ESTABLISHED)) {
19874 rack->rc_ack_can_sendout_data = 0;
19875 if (tcp_in_hpts(rack->rc_tp))
19876 tcp_hpts_remove(rack->rc_tp);
19877 } else if (tcp_in_hpts(rack->rc_tp)) {
19879 * On the hpts you can't pass even if ACKNOW is on, we will
19880 * when the hpts fires.
19882 #ifdef TCP_ACCOUNTING
19883 crtsc = get_cyclecount();
19884 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19885 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
19887 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
19888 tp->tcp_cnt_counters[SND_BLOCKED]++;
19890 sched_unpin();
19891 #endif
19892 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
19893 return (0);
19895 /* Finish out both pacing early and late accounting */
19896 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
19897 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
19898 early = rack->r_ctl.rc_last_output_to - cts;
19899 } else
19900 early = 0;
19901 if (delayed && (rack->rc_always_pace == 1)) {
19902 rack->r_ctl.rc_agg_delayed += delayed;
19903 rack->r_late = 1;
19904 } else if (early && (rack->rc_always_pace == 1)) {
19905 rack->r_ctl.rc_agg_early += early;
19906 rack->r_early = 1;
19907 } else if (rack->rc_always_pace == 0) {
19908 /* Non-paced we are not late */
19909 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0;
19910 rack->r_early = rack->r_late = 0;
19912 /* Now that early/late accounting is done turn off the flag */
19913 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
19914 rack->r_wanted_output = 0;
19915 rack->r_timer_override = 0;
19916 if ((tp->t_state != rack->r_state) &&
19917 TCPS_HAVEESTABLISHED(tp->t_state)) {
19918 rack_set_state(tp, rack);
19920 if ((rack->r_fast_output) &&
19921 (doing_tlp == 0) &&
19922 (tp->rcv_numsacks == 0)) {
19923 int ret;
19925 error = 0;
19926 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
19927 if (ret >= 0)
19928 return(ret);
19929 else if (error) {
19930 inp = rack->rc_inp;
19931 so = inp->inp_socket;
19932 sb = &so->so_snd;
19933 goto nomore;
19936 inp = rack->rc_inp;
19938 * For TFO connections in SYN_SENT or SYN_RECEIVED,
19939 * only allow the initial SYN or SYN|ACK and those sent
19940 * by the retransmit timer.
19942 if ((tp->t_flags & TF_FASTOPEN) &&
19943 ((tp->t_state == TCPS_SYN_RECEIVED) ||
19944 (tp->t_state == TCPS_SYN_SENT)) &&
19945 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
19946 (tp->t_rxtshift == 0)) { /* not a retransmit */
19947 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
19948 #ifdef TCP_ACCOUNTING
19949 sched_unpin();
19950 #endif
19951 return (0);
19954 * Determine length of data that should be transmitted, and flags
19955 * that will be used. If there is some data or critical controls
19956 * (SYN, RST) to send, then transmit; otherwise, investigate
19957 * further.
19959 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
19960 if (tp->t_idle_reduce) {
19961 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur))
19962 rack_cc_after_idle(rack, tp);
19964 tp->t_flags &= ~TF_LASTIDLE;
19965 if (idle) {
19966 if (tp->t_flags & TF_MORETOCOME) {
19967 tp->t_flags |= TF_LASTIDLE;
19968 idle = 0;
19971 if ((tp->snd_una == tp->snd_max) &&
19972 rack->r_ctl.rc_went_idle_time &&
19973 (cts > rack->r_ctl.rc_went_idle_time)) {
19974 tot_idle = (cts - rack->r_ctl.rc_went_idle_time);
19975 if (tot_idle > rack_min_probertt_hold) {
19976 /* Count as a probe rtt */
19977 if (rack->in_probe_rtt == 0) {
19978 rack->r_ctl.rc_lower_rtt_us_cts = cts;
19979 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
19980 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
19981 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
19982 } else {
19983 rack_exit_probertt(rack, cts);
19986 } else
19987 tot_idle = 0;
19988 if (rack_use_fsb &&
19989 (rack->r_ctl.fsb.tcp_ip_hdr) &&
19990 (rack->r_fsb_inited == 0) &&
19991 (rack->r_state != TCPS_CLOSED))
19992 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]);
19993 if (rack->rc_sendvars_notset == 1) {
19994 rack->rc_sendvars_notset = 0;
19996 * Make sure any TCP timers (keep-alive) is not running.
19998 tcp_timer_stop(tp);
20000 if ((rack->rack_no_prr == 1) &&
20001 (rack->rc_always_pace == 0)) {
20003 * Sanity check before sending, if we have
20004 * no-pacing enabled and prr is turned off that
20005 * is a logistics error. Correct this by turnning
20006 * prr back on. A user *must* set some form of
20007 * pacing in order to turn PRR off. We do this
20008 * in the output path so that we can avoid socket
20009 * option ordering issues that would occur if we
20010 * tried to do it while setting rack_no_prr on.
20012 rack->rack_no_prr = 0;
20014 if ((rack->pcm_enabled == 1) &&
20015 (rack->pcm_needed == 0) &&
20016 (tot_idle > 0)) {
20018 * We have been idle some micro seconds. We need
20019 * to factor this in to see if a PCM is needed.
20021 uint32_t rtts_idle, rnds;
20023 if (tp->t_srtt)
20024 rtts_idle = tot_idle / tp->t_srtt;
20025 else
20026 rtts_idle = 0;
20027 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round;
20028 rack->r_ctl.pcm_idle_rounds += rtts_idle;
20029 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) {
20030 rack->pcm_needed = 1;
20031 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round );
20034 again:
20035 sendalot = 0;
20036 cts = tcp_get_usecs(&tv);
20037 ms_cts = tcp_tv_to_mssectick(&tv);
20038 tso = 0;
20039 mtu = 0;
20040 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
20041 minseg = segsiz;
20042 if (rack->r_ctl.rc_pace_max_segs == 0)
20043 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
20044 else
20045 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
20046 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
20047 (rack->r_ctl.pcm_max_seg == 0)) {
20049 * We set in our first send so we know that the ctf_fixed_maxseg
20050 * has been fully set. If we do it in rack_init() we most likely
20051 * see 512 bytes so we end up at 5120, not desirable.
20053 rack->r_ctl.pcm_max_seg = rc_init_window(rack);
20054 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) {
20056 * Assure our initial PCM probe is at least 10 MSS.
20058 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10;
20061 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) {
20062 uint32_t rw_avail, cwa;
20064 if (tp->snd_wnd > ctf_outstanding(tp))
20065 rw_avail = tp->snd_wnd - ctf_outstanding(tp);
20066 else
20067 rw_avail = 0;
20068 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked))
20069 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
20070 else
20071 cwa = 0;
20072 if ((cwa >= rack->r_ctl.pcm_max_seg) &&
20073 (rw_avail > rack->r_ctl.pcm_max_seg)) {
20074 /* Raise up the max seg for this trip through */
20075 pace_max_seg = rack->r_ctl.pcm_max_seg;
20076 /* Disable any fast output */
20077 rack->r_fast_output = 0;
20079 if (rack_verbose_logging) {
20080 rack_log_pcm(rack, 4,
20081 cwa, rack->r_ctl.pcm_max_seg, rw_avail);
20084 sb_offset = tp->snd_max - tp->snd_una;
20085 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
20086 flags = tcp_outflags[tp->t_state];
20087 while (rack->rc_free_cnt < rack_free_cache) {
20088 rsm = rack_alloc(rack);
20089 if (rsm == NULL) {
20090 if (hpts_calling)
20091 /* Retry in a ms */
20092 slot = (1 * HPTS_USEC_IN_MSEC);
20093 so = inp->inp_socket;
20094 sb = &so->so_snd;
20095 goto just_return_nolock;
20097 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
20098 rack->rc_free_cnt++;
20099 rsm = NULL;
20101 sack_rxmit = 0;
20102 len = 0;
20103 rsm = NULL;
20104 if (flags & TH_RST) {
20105 SOCK_SENDBUF_LOCK(inp->inp_socket);
20106 so = inp->inp_socket;
20107 sb = &so->so_snd;
20108 goto send;
20110 if (rack->r_ctl.rc_resend) {
20111 /* Retransmit timer */
20112 rsm = rack->r_ctl.rc_resend;
20113 rack->r_ctl.rc_resend = NULL;
20114 len = rsm->r_end - rsm->r_start;
20115 sack_rxmit = 1;
20116 sendalot = 0;
20117 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20118 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20119 __func__, __LINE__,
20120 rsm->r_start, tp->snd_una, tp, rack, rsm));
20121 sb_offset = rsm->r_start - tp->snd_una;
20122 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
20123 } else if (rack->r_collapse_point_valid &&
20124 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
20126 * If an RSM is returned then enough time has passed
20127 * for us to retransmit it. Move up the collapse point,
20128 * since this rsm has its chance to retransmit now.
20130 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT);
20131 rack->r_ctl.last_collapse_point = rsm->r_end;
20132 /* Are we done? */
20133 if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
20134 rack->r_ctl.high_collapse_point))
20135 rack->r_collapse_point_valid = 0;
20136 sack_rxmit = 1;
20137 /* We are not doing a TLP */
20138 doing_tlp = 0;
20139 len = rsm->r_end - rsm->r_start;
20140 sb_offset = rsm->r_start - tp->snd_una;
20141 sendalot = 0;
20142 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
20143 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
20144 /* We have a retransmit that takes precedence */
20145 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
20146 ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
20147 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
20148 /* Enter recovery if not induced by a time-out */
20149 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
20151 #ifdef INVARIANTS
20152 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
20153 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
20154 tp, rack, rsm, rsm->r_start, tp->snd_una);
20156 #endif
20157 len = rsm->r_end - rsm->r_start;
20158 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20159 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20160 __func__, __LINE__,
20161 rsm->r_start, tp->snd_una, tp, rack, rsm));
20162 sb_offset = rsm->r_start - tp->snd_una;
20163 sendalot = 0;
20164 rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
20165 if (len > 0) {
20166 sack_rxmit = 1;
20167 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
20168 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
20169 min(len, segsiz));
20171 } else if (rack->r_ctl.rc_tlpsend) {
20172 /* Tail loss probe */
20173 long cwin;
20174 long tlen;
20177 * Check if we can do a TLP with a RACK'd packet
20178 * this can happen if we are not doing the rack
20179 * cheat and we skipped to a TLP and it
20180 * went off.
20182 rsm = rack->r_ctl.rc_tlpsend;
20183 /* We are doing a TLP make sure the flag is preent */
20184 rsm->r_flags |= RACK_TLP;
20185 rack->r_ctl.rc_tlpsend = NULL;
20186 sack_rxmit = 1;
20187 tlen = rsm->r_end - rsm->r_start;
20188 if (tlen > segsiz)
20189 tlen = segsiz;
20190 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20191 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20192 __func__, __LINE__,
20193 rsm->r_start, tp->snd_una, tp, rack, rsm));
20194 sb_offset = rsm->r_start - tp->snd_una;
20195 cwin = min(tp->snd_wnd, tlen);
20196 len = cwin;
20198 if (rack->r_must_retran &&
20199 (doing_tlp == 0) &&
20200 (SEQ_GT(tp->snd_max, tp->snd_una)) &&
20201 (rsm == NULL)) {
20203 * There are two different ways that we
20204 * can get into this block:
20205 * a) This is a non-sack connection, we had a time-out
20206 * and thus r_must_retran was set and everything
20207 * left outstanding as been marked for retransmit.
20208 * b) The MTU of the path shrank, so that everything
20209 * was marked to be retransmitted with the smaller
20210 * mtu and r_must_retran was set.
20212 * This means that we expect the sendmap (outstanding)
20213 * to all be marked must. We can use the tmap to
20214 * look at them.
20217 int sendwin, flight;
20219 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
20220 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
20221 if (flight >= sendwin) {
20223 * We can't send yet.
20225 so = inp->inp_socket;
20226 sb = &so->so_snd;
20227 goto just_return_nolock;
20230 * This is the case a/b mentioned above. All
20231 * outstanding/not-acked should be marked.
20232 * We can use the tmap to find them.
20234 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
20235 if (rsm == NULL) {
20236 /* TSNH */
20237 rack->r_must_retran = 0;
20238 rack->r_ctl.rc_out_at_rto = 0;
20239 so = inp->inp_socket;
20240 sb = &so->so_snd;
20241 goto just_return_nolock;
20243 if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
20245 * The first one does not have the flag, did we collapse
20246 * further up in our list?
20248 rack->r_must_retran = 0;
20249 rack->r_ctl.rc_out_at_rto = 0;
20250 rsm = NULL;
20251 sack_rxmit = 0;
20252 } else {
20253 sack_rxmit = 1;
20254 len = rsm->r_end - rsm->r_start;
20255 sb_offset = rsm->r_start - tp->snd_una;
20256 sendalot = 0;
20257 if ((rack->full_size_rxt == 0) &&
20258 (rack->shape_rxt_to_pacing_min == 0) &&
20259 (len >= segsiz))
20260 len = segsiz;
20261 else if (rack->shape_rxt_to_pacing_min &&
20262 rack->gp_ready) {
20263 /* We use pacing min as shaping len req */
20264 uint32_t maxlen;
20266 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
20267 if (len > maxlen)
20268 len = maxlen;
20271 * Delay removing the flag RACK_MUST_RXT so
20272 * that the fastpath for retransmit will
20273 * work with this rsm.
20278 * Enforce a connection sendmap count limit if set
20279 * as long as we are not retransmiting.
20281 if ((rsm == NULL) &&
20282 (V_tcp_map_entries_limit > 0) &&
20283 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
20284 counter_u64_add(rack_to_alloc_limited, 1);
20285 if (!rack->alloc_limit_reported) {
20286 rack->alloc_limit_reported = 1;
20287 counter_u64_add(rack_alloc_limited_conns, 1);
20289 so = inp->inp_socket;
20290 sb = &so->so_snd;
20291 goto just_return_nolock;
20293 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
20294 /* we are retransmitting the fin */
20295 len--;
20296 if (len) {
20298 * When retransmitting data do *not* include the
20299 * FIN. This could happen from a TLP probe.
20301 flags &= ~TH_FIN;
20304 if (rsm && rack->r_fsb_inited &&
20305 rack_use_rsm_rfo &&
20306 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
20307 int ret;
20309 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
20310 if (ret == 0)
20311 return (0);
20313 so = inp->inp_socket;
20314 sb = &so->so_snd;
20315 if (do_a_prefetch == 0) {
20316 kern_prefetch(sb, &do_a_prefetch);
20317 do_a_prefetch = 1;
20319 #ifdef NETFLIX_SHARED_CWND
20320 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
20321 rack->rack_enable_scwnd) {
20322 /* We are doing cwnd sharing */
20323 if (rack->gp_ready &&
20324 (rack->rack_attempted_scwnd == 0) &&
20325 (rack->r_ctl.rc_scw == NULL) &&
20326 tp->t_lib) {
20327 /* The pcbid is in, lets make an attempt */
20328 counter_u64_add(rack_try_scwnd, 1);
20329 rack->rack_attempted_scwnd = 1;
20330 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
20331 &rack->r_ctl.rc_scw_index,
20332 segsiz);
20334 if (rack->r_ctl.rc_scw &&
20335 (rack->rack_scwnd_is_idle == 1) &&
20336 sbavail(&so->so_snd)) {
20337 /* we are no longer out of data */
20338 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
20339 rack->rack_scwnd_is_idle = 0;
20341 if (rack->r_ctl.rc_scw) {
20342 /* First lets update and get the cwnd */
20343 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
20344 rack->r_ctl.rc_scw_index,
20345 tp->snd_cwnd, tp->snd_wnd, segsiz);
20348 #endif
20350 * Get standard flags, and add SYN or FIN if requested by 'hidden'
20351 * state flags.
20353 if (tp->t_flags & TF_NEEDFIN)
20354 flags |= TH_FIN;
20355 if (tp->t_flags & TF_NEEDSYN)
20356 flags |= TH_SYN;
20357 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
20358 void *end_rsm;
20359 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
20360 if (end_rsm)
20361 kern_prefetch(end_rsm, &prefetch_rsm);
20362 prefetch_rsm = 1;
20364 SOCK_SENDBUF_LOCK(so);
20365 if ((sack_rxmit == 0) &&
20366 (TCPS_HAVEESTABLISHED(tp->t_state) ||
20367 (tp->t_flags & TF_FASTOPEN))) {
20369 * We are not retransmitting (sack_rxmit is 0) so we
20370 * are sending new data. This is always based on snd_max.
20371 * Now in theory snd_max may be equal to snd_una, if so
20372 * then nothing is outstanding and the offset would be 0.
20374 uint32_t avail;
20376 avail = sbavail(sb);
20377 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail)
20378 sb_offset = tp->snd_max - tp->snd_una;
20379 else
20380 sb_offset = 0;
20381 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
20382 if (rack->r_ctl.rc_tlp_new_data) {
20383 /* TLP is forcing out new data */
20384 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
20385 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
20387 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
20388 if (tp->snd_wnd > sb_offset)
20389 len = tp->snd_wnd - sb_offset;
20390 else
20391 len = 0;
20392 } else {
20393 len = rack->r_ctl.rc_tlp_new_data;
20395 rack->r_ctl.rc_tlp_new_data = 0;
20396 } else {
20397 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
20399 if ((rack->r_ctl.crte == NULL) &&
20400 IN_FASTRECOVERY(tp->t_flags) &&
20401 (rack->full_size_rxt == 0) &&
20402 (rack->shape_rxt_to_pacing_min == 0) &&
20403 (len > segsiz)) {
20405 * For prr=off, we need to send only 1 MSS
20406 * at a time. We do this because another sack could
20407 * be arriving that causes us to send retransmits and
20408 * we don't want to be on a long pace due to a larger send
20409 * that keeps us from sending out the retransmit.
20411 len = segsiz;
20412 } else if (rack->shape_rxt_to_pacing_min &&
20413 rack->gp_ready) {
20414 /* We use pacing min as shaping len req */
20415 uint32_t maxlen;
20417 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
20418 if (len > maxlen)
20419 len = maxlen;
20420 }/* The else is full_size_rxt is on so send it all */
20421 } else {
20422 uint32_t outstanding;
20424 * We are inside of a Fast recovery episode, this
20425 * is caused by a SACK or 3 dup acks. At this point
20426 * we have sent all the retransmissions and we rely
20427 * on PRR to dictate what we will send in the form of
20428 * new data.
20431 outstanding = tp->snd_max - tp->snd_una;
20432 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
20433 if (tp->snd_wnd > outstanding) {
20434 len = tp->snd_wnd - outstanding;
20435 /* Check to see if we have the data */
20436 if ((sb_offset + len) > avail) {
20437 /* It does not all fit */
20438 if (avail > sb_offset)
20439 len = avail - sb_offset;
20440 else
20441 len = 0;
20443 } else {
20444 len = 0;
20446 } else if (avail > sb_offset) {
20447 len = avail - sb_offset;
20448 } else {
20449 len = 0;
20451 if (len > 0) {
20452 if (len > rack->r_ctl.rc_prr_sndcnt) {
20453 len = rack->r_ctl.rc_prr_sndcnt;
20455 if (len > 0) {
20456 sub_from_prr = 1;
20459 if (len > segsiz) {
20461 * We should never send more than a MSS when
20462 * retransmitting or sending new data in prr
20463 * mode unless the override flag is on. Most
20464 * likely the PRR algorithm is not going to
20465 * let us send a lot as well :-)
20467 if (rack->r_ctl.rc_prr_sendalot == 0) {
20468 len = segsiz;
20470 } else if (len < segsiz) {
20472 * Do we send any? The idea here is if the
20473 * send empty's the socket buffer we want to
20474 * do it. However if not then lets just wait
20475 * for our prr_sndcnt to get bigger.
20477 long leftinsb;
20479 leftinsb = sbavail(sb) - sb_offset;
20480 if (leftinsb > len) {
20481 /* This send does not empty the sb */
20482 len = 0;
20486 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
20488 * If you have not established
20489 * and are not doing FAST OPEN
20490 * no data please.
20492 if ((sack_rxmit == 0) &&
20493 !(tp->t_flags & TF_FASTOPEN)) {
20494 len = 0;
20495 sb_offset = 0;
20498 if (prefetch_so_done == 0) {
20499 kern_prefetch(so, &prefetch_so_done);
20500 prefetch_so_done = 1;
20502 orig_len = len;
20504 * Lop off SYN bit if it has already been sent. However, if this is
20505 * SYN-SENT state and if segment contains data and if we don't know
20506 * that foreign host supports TAO, suppress sending segment.
20508 if ((flags & TH_SYN) &&
20509 SEQ_GT(tp->snd_max, tp->snd_una) &&
20510 ((sack_rxmit == 0) &&
20511 (tp->t_rxtshift == 0))) {
20513 * When sending additional segments following a TFO SYN|ACK,
20514 * do not include the SYN bit.
20516 if ((tp->t_flags & TF_FASTOPEN) &&
20517 (tp->t_state == TCPS_SYN_RECEIVED))
20518 flags &= ~TH_SYN;
20521 * Be careful not to send data and/or FIN on SYN segments. This
20522 * measure is needed to prevent interoperability problems with not
20523 * fully conformant TCP implementations.
20525 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
20526 len = 0;
20527 flags &= ~TH_FIN;
20530 * On TFO sockets, ensure no data is sent in the following cases:
20532 * - When retransmitting SYN|ACK on a passively-created socket
20534 * - When retransmitting SYN on an actively created socket
20536 * - When sending a zero-length cookie (cookie request) on an
20537 * actively created socket
20539 * - When the socket is in the CLOSED state (RST is being sent)
20541 if ((tp->t_flags & TF_FASTOPEN) &&
20542 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
20543 ((tp->t_state == TCPS_SYN_SENT) &&
20544 (tp->t_tfo_client_cookie_len == 0)) ||
20545 (flags & TH_RST))) {
20546 sack_rxmit = 0;
20547 len = 0;
20549 /* Without fast-open there should never be data sent on a SYN */
20550 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) {
20551 len = 0;
20553 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
20554 /* We only send 1 MSS if we have a DSACK block */
20555 add_flag |= RACK_SENT_W_DSACK;
20556 len = segsiz;
20558 if (len <= 0) {
20560 * We have nothing to send, or the window shrank, or
20561 * is closed, do we need to go into persists?
20563 len = 0;
20564 if ((tp->snd_wnd == 0) &&
20565 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
20566 (tp->snd_una == tp->snd_max) &&
20567 (sb_offset < (int)sbavail(sb))) {
20568 rack_enter_persist(tp, rack, cts, tp->snd_una);
20570 } else if ((rsm == NULL) &&
20571 (doing_tlp == 0) &&
20572 (len < pace_max_seg)) {
20574 * We are not sending a maximum sized segment for
20575 * some reason. Should we not send anything (think
20576 * sws or persists)?
20578 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
20579 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
20580 (len < minseg) &&
20581 (len < (int)(sbavail(sb) - sb_offset))) {
20583 * Here the rwnd is less than
20584 * the minimum pacing size, this is not a retransmit,
20585 * we are established and
20586 * the send is not the last in the socket buffer
20587 * we send nothing, and we may enter persists
20588 * if nothing is outstanding.
20590 len = 0;
20591 if (tp->snd_max == tp->snd_una) {
20593 * Nothing out we can
20594 * go into persists.
20596 rack_enter_persist(tp, rack, cts, tp->snd_una);
20598 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
20599 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
20600 (len < (int)(sbavail(sb) - sb_offset)) &&
20601 (len < minseg)) {
20603 * Here we are not retransmitting, and
20604 * the cwnd is not so small that we could
20605 * not send at least a min size (rxt timer
20606 * not having gone off), We have 2 segments or
20607 * more already in flight, its not the tail end
20608 * of the socket buffer and the cwnd is blocking
20609 * us from sending out a minimum pacing segment size.
20610 * Lets not send anything.
20612 len = 0;
20613 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
20614 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
20615 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
20616 (len < (int)(sbavail(sb) - sb_offset)) &&
20617 (TCPS_HAVEESTABLISHED(tp->t_state))) {
20619 * Here we have a send window but we have
20620 * filled it up and we can't send another pacing segment.
20621 * We also have in flight more than 2 segments
20622 * and we are not completing the sb i.e. we allow
20623 * the last bytes of the sb to go out even if
20624 * its not a full pacing segment.
20626 len = 0;
20627 } else if ((rack->r_ctl.crte != NULL) &&
20628 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
20629 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
20630 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
20631 (len < (int)(sbavail(sb) - sb_offset))) {
20633 * Here we are doing hardware pacing, this is not a TLP,
20634 * we are not sending a pace max segment size, there is rwnd
20635 * room to send at least N pace_max_seg, the cwnd is greater
20636 * than or equal to a full pacing segments plus 4 mss and we have 2 or
20637 * more segments in flight and its not the tail of the socket buffer.
20639 * We don't want to send instead we need to get more ack's in to
20640 * allow us to send a full pacing segment. Normally, if we are pacing
20641 * about the right speed, we should have finished our pacing
20642 * send as most of the acks have come back if we are at the
20643 * right rate. This is a bit fuzzy since return path delay
20644 * can delay the acks, which is why we want to make sure we
20645 * have cwnd space to have a bit more than a max pace segments in flight.
20647 * If we have not gotten our acks back we are pacing at too high a
20648 * rate delaying will not hurt and will bring our GP estimate down by
20649 * injecting the delay. If we don't do this we will send
20650 * 2 MSS out in response to the acks being clocked in which
20651 * defeats the point of hw-pacing (i.e. to help us get
20652 * larger TSO's out).
20654 len = 0;
20658 /* len will be >= 0 after this point. */
20659 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
20660 rack_sndbuf_autoscale(rack);
20662 * Decide if we can use TCP Segmentation Offloading (if supported by
20663 * hardware).
20665 * TSO may only be used if we are in a pure bulk sending state. The
20666 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
20667 * options prevent using TSO. With TSO the TCP header is the same
20668 * (except for the sequence number) for all generated packets. This
20669 * makes it impossible to transmit any options which vary per
20670 * generated segment or packet.
20672 * IPv4 handling has a clear separation of ip options and ip header
20673 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
20674 * the right thing below to provide length of just ip options and thus
20675 * checking for ipoptlen is enough to decide if ip options are present.
20677 ipoptlen = 0;
20678 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
20680 * Pre-calculate here as we save another lookup into the darknesses
20681 * of IPsec that way and can actually decide if TSO is ok.
20683 #ifdef INET6
20684 if (isipv6 && IPSEC_ENABLED(ipv6))
20685 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
20686 #ifdef INET
20687 else
20688 #endif
20689 #endif /* INET6 */
20690 #ifdef INET
20691 if (IPSEC_ENABLED(ipv4))
20692 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
20693 #endif /* INET */
20694 #endif
20696 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
20697 ipoptlen += ipsec_optlen;
20698 #endif
20699 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
20700 (tp->t_port == 0) &&
20701 ((tp->t_flags & TF_SIGNATURE) == 0) &&
20702 sack_rxmit == 0 &&
20703 ipoptlen == 0)
20704 tso = 1;
20706 uint32_t outstanding __unused;
20708 outstanding = tp->snd_max - tp->snd_una;
20709 if (tp->t_flags & TF_SENTFIN) {
20711 * If we sent a fin, snd_max is 1 higher than
20712 * snd_una
20714 outstanding--;
20716 if (sack_rxmit) {
20717 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
20718 flags &= ~TH_FIN;
20721 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
20722 (long)TCP_MAXWIN << tp->rcv_scale);
20725 * Sender silly window avoidance. We transmit under the following
20726 * conditions when len is non-zero:
20728 * - We have a full segment (or more with TSO) - This is the last
20729 * buffer in a write()/send() and we are either idle or running
20730 * NODELAY - we've timed out (e.g. persist timer) - we have more
20731 * then 1/2 the maximum send window's worth of data (receiver may be
20732 * limited the window size) - we need to retransmit
20734 if (len) {
20735 if (len >= segsiz) {
20736 goto send;
20739 * NOTE! on localhost connections an 'ack' from the remote
20740 * end may occur synchronously with the output and cause us
20741 * to flush a buffer queued with moretocome. XXX
20744 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
20745 (idle || (tp->t_flags & TF_NODELAY)) &&
20746 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
20747 (tp->t_flags & TF_NOPUSH) == 0) {
20748 pass = 2;
20749 goto send;
20751 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
20752 pass = 22;
20753 goto send;
20755 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
20756 pass = 4;
20757 goto send;
20759 if (sack_rxmit) {
20760 pass = 6;
20761 goto send;
20763 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
20764 (ctf_outstanding(tp) < (segsiz * 2))) {
20766 * We have less than two MSS outstanding (delayed ack)
20767 * and our rwnd will not let us send a full sized
20768 * MSS. Lets go ahead and let this small segment
20769 * out because we want to try to have at least two
20770 * packets inflight to not be caught by delayed ack.
20772 pass = 12;
20773 goto send;
20777 * Sending of standalone window updates.
20779 * Window updates are important when we close our window due to a
20780 * full socket buffer and are opening it again after the application
20781 * reads data from it. Once the window has opened again and the
20782 * remote end starts to send again the ACK clock takes over and
20783 * provides the most current window information.
20785 * We must avoid the silly window syndrome whereas every read from
20786 * the receive buffer, no matter how small, causes a window update
20787 * to be sent. We also should avoid sending a flurry of window
20788 * updates when the socket buffer had queued a lot of data and the
20789 * application is doing small reads.
20791 * Prevent a flurry of pointless window updates by only sending an
20792 * update when we can increase the advertized window by more than
20793 * 1/4th of the socket buffer capacity. When the buffer is getting
20794 * full or is very small be more aggressive and send an update
20795 * whenever we can increase by two mss sized segments. In all other
20796 * situations the ACK's to new incoming data will carry further
20797 * window increases.
20799 * Don't send an independent window update if a delayed ACK is
20800 * pending (it will get piggy-backed on it) or the remote side
20801 * already has done a half-close and won't send more data. Skip
20802 * this if the connection is in T/TCP half-open state.
20804 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
20805 !(tp->t_flags & TF_DELACK) &&
20806 !TCPS_HAVERCVDFIN(tp->t_state)) {
20808 * "adv" is the amount we could increase the window, taking
20809 * into account that we are limited by TCP_MAXWIN <<
20810 * tp->rcv_scale.
20812 int32_t adv;
20813 int oldwin;
20815 adv = recwin;
20816 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
20817 oldwin = (tp->rcv_adv - tp->rcv_nxt);
20818 if (adv > oldwin)
20819 adv -= oldwin;
20820 else {
20821 /* We can't increase the window */
20822 adv = 0;
20824 } else
20825 oldwin = 0;
20828 * If the new window size ends up being the same as or less
20829 * than the old size when it is scaled, then don't force
20830 * a window update.
20832 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
20833 goto dontupdate;
20835 if (adv >= (int32_t)(2 * segsiz) &&
20836 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
20837 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
20838 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
20839 pass = 7;
20840 goto send;
20842 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
20843 pass = 23;
20844 goto send;
20847 dontupdate:
20850 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
20851 * is also a catch-all for the retransmit timer timeout case.
20853 if (tp->t_flags & TF_ACKNOW) {
20854 pass = 8;
20855 goto send;
20857 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
20858 pass = 9;
20859 goto send;
20862 * If our state indicates that FIN should be sent and we have not
20863 * yet done so, then we need to send.
20865 if ((flags & TH_FIN) &&
20866 (tp->snd_max == tp->snd_una)) {
20867 pass = 11;
20868 goto send;
20871 * No reason to send a segment, just return.
20873 just_return:
20874 SOCK_SENDBUF_UNLOCK(so);
20875 just_return_nolock:
20877 int app_limited = CTF_JR_SENT_DATA;
20879 if ((tp->t_flags & TF_FASTOPEN) == 0 &&
20880 (flags & TH_FIN) &&
20881 (len == 0) &&
20882 (sbused(sb) == (tp->snd_max - tp->snd_una)) &&
20883 ((tp->snd_max - tp->snd_una) <= segsiz)) {
20885 * Ok less than or right at a MSS is
20886 * outstanding. The original FreeBSD stack would
20887 * have sent a FIN, which can speed things up for
20888 * a transactional application doing a MSG_WAITALL.
20889 * To speed things up since we do *not* send a FIN
20890 * if data is outstanding, we send a "challenge ack".
20891 * The idea behind that is instead of having to have
20892 * the peer wait for the delayed-ack timer to run off
20893 * we send an ack that makes the peer send us an ack.
20895 rack_send_ack_challange(rack);
20897 if (tot_len_this_send > 0) {
20898 rack->r_ctl.fsb.recwin = recwin;
20899 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__);
20900 if ((error == 0) &&
20901 rack_use_rfo &&
20902 ((flags & (TH_SYN|TH_FIN)) == 0) &&
20903 (ipoptlen == 0) &&
20904 rack->r_fsb_inited &&
20905 TCPS_HAVEESTABLISHED(tp->t_state) &&
20906 ((IN_RECOVERY(tp->t_flags)) == 0) &&
20907 (rack->r_must_retran == 0) &&
20908 ((tp->t_flags & TF_NEEDFIN) == 0) &&
20909 (len > 0) && (orig_len > 0) &&
20910 (orig_len > len) &&
20911 ((orig_len - len) >= segsiz) &&
20912 ((optlen == 0) ||
20913 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
20914 /* We can send at least one more MSS using our fsb */
20915 rack_setup_fast_output(tp, rack, sb, len, orig_len,
20916 segsiz, pace_max_seg, hw_tls, flags);
20917 } else
20918 rack->r_fast_output = 0;
20919 rack_log_fsb(rack, tp, so, flags,
20920 ipoptlen, orig_len, len, 0,
20921 1, optlen, __LINE__, 1);
20922 /* Assure when we leave that snd_nxt will point to top */
20923 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
20924 tp->snd_nxt = tp->snd_max;
20925 } else {
20926 int end_window = 0;
20927 uint32_t seq = tp->gput_ack;
20929 rsm = tqhash_max(rack->r_ctl.tqh);
20930 if (rsm) {
20932 * Mark the last sent that we just-returned (hinting
20933 * that delayed ack may play a role in any rtt measurement).
20935 rsm->r_just_ret = 1;
20937 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
20938 rack->r_ctl.rc_agg_delayed = 0;
20939 rack->r_early = 0;
20940 rack->r_late = 0;
20941 rack->r_ctl.rc_agg_early = 0;
20942 if ((ctf_outstanding(tp) +
20943 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
20944 minseg)) >= tp->snd_wnd) {
20945 /* We are limited by the rwnd */
20946 app_limited = CTF_JR_RWND_LIMITED;
20947 if (IN_FASTRECOVERY(tp->t_flags))
20948 rack->r_ctl.rc_prr_sndcnt = 0;
20949 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
20950 /* We are limited by whats available -- app limited */
20951 app_limited = CTF_JR_APP_LIMITED;
20952 if (IN_FASTRECOVERY(tp->t_flags))
20953 rack->r_ctl.rc_prr_sndcnt = 0;
20954 } else if ((idle == 0) &&
20955 ((tp->t_flags & TF_NODELAY) == 0) &&
20956 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
20957 (len < segsiz)) {
20959 * No delay is not on and the
20960 * user is sending less than 1MSS. This
20961 * brings out SWS avoidance so we
20962 * don't send. Another app-limited case.
20964 app_limited = CTF_JR_APP_LIMITED;
20965 } else if (tp->t_flags & TF_NOPUSH) {
20967 * The user has requested no push of
20968 * the last segment and we are
20969 * at the last segment. Another app
20970 * limited case.
20972 app_limited = CTF_JR_APP_LIMITED;
20973 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
20974 /* Its the cwnd */
20975 app_limited = CTF_JR_CWND_LIMITED;
20976 } else if (IN_FASTRECOVERY(tp->t_flags) &&
20977 (rack->rack_no_prr == 0) &&
20978 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
20979 app_limited = CTF_JR_PRR;
20980 } else {
20981 /* Now why here are we not sending? */
20982 #ifdef NOW
20983 #ifdef INVARIANTS
20984 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
20985 #endif
20986 #endif
20987 app_limited = CTF_JR_ASSESSING;
20990 * App limited in some fashion, for our pacing GP
20991 * measurements we don't want any gap (even cwnd).
20992 * Close down the measurement window.
20994 if (rack_cwnd_block_ends_measure &&
20995 ((app_limited == CTF_JR_CWND_LIMITED) ||
20996 (app_limited == CTF_JR_PRR))) {
20998 * The reason we are not sending is
20999 * the cwnd (or prr). We have been configured
21000 * to end the measurement window in
21001 * this case.
21003 end_window = 1;
21004 } else if (rack_rwnd_block_ends_measure &&
21005 (app_limited == CTF_JR_RWND_LIMITED)) {
21007 * We are rwnd limited and have been
21008 * configured to end the measurement
21009 * window in this case.
21011 end_window = 1;
21012 } else if (app_limited == CTF_JR_APP_LIMITED) {
21014 * A true application limited period, we have
21015 * ran out of data.
21017 end_window = 1;
21018 } else if (app_limited == CTF_JR_ASSESSING) {
21020 * In the assessing case we hit the end of
21021 * the if/else and had no known reason
21022 * This will panic us under invariants..
21024 * If we get this out in logs we need to
21025 * investagate which reason we missed.
21027 end_window = 1;
21029 if (end_window) {
21030 uint8_t log = 0;
21032 /* Adjust the Gput measurement */
21033 if ((tp->t_flags & TF_GPUTINPROG) &&
21034 SEQ_GT(tp->gput_ack, tp->snd_max)) {
21035 tp->gput_ack = tp->snd_max;
21036 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
21038 * There is not enough to measure.
21040 tp->t_flags &= ~TF_GPUTINPROG;
21041 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
21042 rack->r_ctl.rc_gp_srtt /*flex1*/,
21043 tp->gput_seq,
21044 0, 0, 18, __LINE__, NULL, 0);
21045 } else
21046 log = 1;
21048 /* Mark the last packet has app limited */
21049 rsm = tqhash_max(rack->r_ctl.tqh);
21050 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
21051 if (rack->r_ctl.rc_app_limited_cnt == 0)
21052 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
21053 else {
21055 * Go out to the end app limited and mark
21056 * this new one as next and move the end_appl up
21057 * to this guy.
21059 if (rack->r_ctl.rc_end_appl)
21060 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
21061 rack->r_ctl.rc_end_appl = rsm;
21063 rsm->r_flags |= RACK_APP_LIMITED;
21064 rack->r_ctl.rc_app_limited_cnt++;
21066 if (log)
21067 rack_log_pacing_delay_calc(rack,
21068 rack->r_ctl.rc_app_limited_cnt, seq,
21069 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
21072 /* Check if we need to go into persists or not */
21073 if ((tp->snd_max == tp->snd_una) &&
21074 TCPS_HAVEESTABLISHED(tp->t_state) &&
21075 sbavail(sb) &&
21076 (sbavail(sb) > tp->snd_wnd) &&
21077 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
21078 /* Yes lets make sure to move to persist before timer-start */
21079 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
21081 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
21082 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
21084 #ifdef NETFLIX_SHARED_CWND
21085 if ((sbavail(sb) == 0) &&
21086 rack->r_ctl.rc_scw) {
21087 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
21088 rack->rack_scwnd_is_idle = 1;
21090 #endif
21091 #ifdef TCP_ACCOUNTING
21092 if (tot_len_this_send > 0) {
21093 crtsc = get_cyclecount();
21094 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21095 tp->tcp_cnt_counters[SND_OUT_DATA]++;
21097 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21098 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
21100 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21101 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
21103 } else {
21104 crtsc = get_cyclecount();
21105 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21106 tp->tcp_cnt_counters[SND_LIMITED]++;
21108 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21109 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
21112 sched_unpin();
21113 #endif
21114 return (0);
21116 send:
21117 if ((rack->r_ctl.crte != NULL) &&
21118 (rsm == NULL) &&
21119 ((rack->rc_hw_nobuf == 1) ||
21120 (rack_hw_check_queue && (check_done == 0)))) {
21122 * We only want to do this once with the hw_check_queue,
21123 * for the enobuf case we would only do it once if
21124 * we come around to again, the flag will be clear.
21126 check_done = 1;
21127 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz);
21128 if (slot) {
21129 rack->r_ctl.rc_agg_delayed = 0;
21130 rack->r_ctl.rc_agg_early = 0;
21131 rack->r_early = 0;
21132 rack->r_late = 0;
21133 SOCK_SENDBUF_UNLOCK(so);
21134 goto skip_all_send;
21137 if (rsm || sack_rxmit)
21138 counter_u64_add(rack_nfto_resend, 1);
21139 else
21140 counter_u64_add(rack_non_fto_send, 1);
21141 if ((flags & TH_FIN) &&
21142 sbavail(sb)) {
21144 * We do not transmit a FIN
21145 * with data outstanding. We
21146 * need to make it so all data
21147 * is acked first.
21149 flags &= ~TH_FIN;
21150 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
21151 (sbused(sb) == (tp->snd_max - tp->snd_una)) &&
21152 ((tp->snd_max - tp->snd_una) <= segsiz)) {
21154 * Ok less than or right at a MSS is
21155 * outstanding. The original FreeBSD stack would
21156 * have sent a FIN, which can speed things up for
21157 * a transactional application doing a MSG_WAITALL.
21158 * To speed things up since we do *not* send a FIN
21159 * if data is outstanding, we send a "challenge ack".
21160 * The idea behind that is instead of having to have
21161 * the peer wait for the delayed-ack timer to run off
21162 * we send an ack that makes the peer send us an ack.
21164 rack_send_ack_challange(rack);
21167 /* Enforce stack imposed max seg size if we have one */
21168 if (pace_max_seg &&
21169 (len > pace_max_seg)) {
21170 mark = 1;
21171 len = pace_max_seg;
21173 if ((rsm == NULL) &&
21174 (rack->pcm_in_progress == 0) &&
21175 (rack->r_ctl.pcm_max_seg > 0) &&
21176 (len >= rack->r_ctl.pcm_max_seg)) {
21177 /* It is large enough for a measurement */
21178 add_flag |= RACK_IS_PCM;
21179 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag);
21180 } else if (rack_verbose_logging) {
21181 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag);
21184 SOCKBUF_LOCK_ASSERT(sb);
21185 if (len > 0) {
21186 if (len >= segsiz)
21187 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
21188 else
21189 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
21192 * Before ESTABLISHED, force sending of initial options unless TCP
21193 * set not to do any options. NOTE: we assume that the IP/TCP header
21194 * plus TCP options always fit in a single mbuf, leaving room for a
21195 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
21196 * + optlen <= MCLBYTES
21198 optlen = 0;
21199 #ifdef INET6
21200 if (isipv6)
21201 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
21202 else
21203 #endif
21204 hdrlen = sizeof(struct tcpiphdr);
21207 * Ok what seq are we sending from. If we have
21208 * no rsm to use, then we look at various bits,
21209 * if we are putting out a SYN it will be ISS.
21210 * If we are retransmitting a FIN it will
21211 * be snd_max-1 else its snd_max.
21213 if (rsm == NULL) {
21214 if (flags & TH_SYN)
21215 rack_seq = tp->iss;
21216 else if ((flags & TH_FIN) &&
21217 (tp->t_flags & TF_SENTFIN))
21218 rack_seq = tp->snd_max - 1;
21219 else
21220 rack_seq = tp->snd_max;
21221 } else {
21222 rack_seq = rsm->r_start;
21225 * Compute options for segment. We only have to care about SYN and
21226 * established connection segments. Options for SYN-ACK segments
21227 * are handled in TCP syncache.
21229 to.to_flags = 0;
21230 if ((tp->t_flags & TF_NOOPT) == 0) {
21231 /* Maximum segment size. */
21232 if (flags & TH_SYN) {
21233 to.to_mss = tcp_mssopt(&inp->inp_inc);
21234 if (tp->t_port)
21235 to.to_mss -= V_tcp_udp_tunneling_overhead;
21236 to.to_flags |= TOF_MSS;
21239 * On SYN or SYN|ACK transmits on TFO connections,
21240 * only include the TFO option if it is not a
21241 * retransmit, as the presence of the TFO option may
21242 * have caused the original SYN or SYN|ACK to have
21243 * been dropped by a middlebox.
21245 if ((tp->t_flags & TF_FASTOPEN) &&
21246 (tp->t_rxtshift == 0)) {
21247 if (tp->t_state == TCPS_SYN_RECEIVED) {
21248 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
21249 to.to_tfo_cookie =
21250 (u_int8_t *)&tp->t_tfo_cookie.server;
21251 to.to_flags |= TOF_FASTOPEN;
21252 wanted_cookie = 1;
21253 } else if (tp->t_state == TCPS_SYN_SENT) {
21254 to.to_tfo_len =
21255 tp->t_tfo_client_cookie_len;
21256 to.to_tfo_cookie =
21257 tp->t_tfo_cookie.client;
21258 to.to_flags |= TOF_FASTOPEN;
21259 wanted_cookie = 1;
21261 * If we wind up having more data to
21262 * send with the SYN than can fit in
21263 * one segment, don't send any more
21264 * until the SYN|ACK comes back from
21265 * the other end.
21267 sendalot = 0;
21271 /* Window scaling. */
21272 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
21273 to.to_wscale = tp->request_r_scale;
21274 to.to_flags |= TOF_SCALE;
21276 /* Timestamps. */
21277 if ((tp->t_flags & TF_RCVD_TSTMP) ||
21278 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
21279 uint32_t ts_to_use;
21281 if ((rack->r_rcvpath_rtt_up == 1) &&
21282 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) {
21284 * When we are doing a rcv_rtt probe all
21285 * other timestamps use the next msec. This
21286 * is safe since our previous ack is in the
21287 * air and we will just have a few more
21288 * on the next ms. This assures that only
21289 * the one ack has the ms_cts that was on
21290 * our ack-probe.
21292 ts_to_use = ms_cts + 1;
21293 } else {
21294 ts_to_use = ms_cts;
21296 to.to_tsval = ts_to_use + tp->ts_offset;
21297 to.to_tsecr = tp->ts_recent;
21298 to.to_flags |= TOF_TS;
21299 if ((len == 0) &&
21300 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
21301 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) &&
21302 (tp->snd_una == tp->snd_max) &&
21303 (flags & TH_ACK) &&
21304 (sbavail(sb) == 0) &&
21305 (rack->r_ctl.current_round != 0) &&
21306 ((flags & (TH_SYN|TH_FIN)) == 0) &&
21307 (rack->r_rcvpath_rtt_up == 0)) {
21308 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts;
21309 rack->r_ctl.last_time_of_arm_rcv = cts;
21310 rack->r_rcvpath_rtt_up = 1;
21311 /* Subtract 1 from seq to force a response */
21312 rack_seq--;
21315 /* Set receive buffer autosizing timestamp. */
21316 if (tp->rfbuf_ts == 0 &&
21317 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
21318 tp->rfbuf_ts = ms_cts;
21320 /* Selective ACK's. */
21321 if (tp->t_flags & TF_SACK_PERMIT) {
21322 if (flags & TH_SYN)
21323 to.to_flags |= TOF_SACKPERM;
21324 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
21325 tp->rcv_numsacks > 0) {
21326 to.to_flags |= TOF_SACK;
21327 to.to_nsacks = tp->rcv_numsacks;
21328 to.to_sacks = (u_char *)tp->sackblks;
21331 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
21332 /* TCP-MD5 (RFC2385). */
21333 if (tp->t_flags & TF_SIGNATURE)
21334 to.to_flags |= TOF_SIGNATURE;
21335 #endif
21337 /* Processing the options. */
21338 hdrlen += optlen = tcp_addoptions(&to, opt);
21340 * If we wanted a TFO option to be added, but it was unable
21341 * to fit, ensure no data is sent.
21343 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie &&
21344 !(to.to_flags & TOF_FASTOPEN))
21345 len = 0;
21347 if (tp->t_port) {
21348 if (V_tcp_udp_tunneling_port == 0) {
21349 /* The port was removed?? */
21350 SOCK_SENDBUF_UNLOCK(so);
21351 #ifdef TCP_ACCOUNTING
21352 crtsc = get_cyclecount();
21353 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21354 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
21356 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
21357 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
21359 sched_unpin();
21360 #endif
21361 return (EHOSTUNREACH);
21363 hdrlen += sizeof(struct udphdr);
21365 #ifdef INET6
21366 if (isipv6)
21367 ipoptlen = ip6_optlen(inp);
21368 else
21369 #endif
21370 if (inp->inp_options)
21371 ipoptlen = inp->inp_options->m_len -
21372 offsetof(struct ipoption, ipopt_list);
21373 else
21374 ipoptlen = 0;
21375 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
21376 ipoptlen += ipsec_optlen;
21377 #endif
21380 * Adjust data length if insertion of options will bump the packet
21381 * length beyond the t_maxseg length. Clear the FIN bit because we
21382 * cut off the tail of the segment.
21384 if (len + optlen + ipoptlen > tp->t_maxseg) {
21385 if (tso) {
21386 uint32_t if_hw_tsomax;
21387 uint32_t moff;
21388 int32_t max_len;
21390 /* extract TSO information */
21391 if_hw_tsomax = tp->t_tsomax;
21392 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
21393 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
21394 KASSERT(ipoptlen == 0,
21395 ("%s: TSO can't do IP options", __func__));
21398 * Check if we should limit by maximum payload
21399 * length:
21401 if (if_hw_tsomax != 0) {
21402 /* compute maximum TSO length */
21403 max_len = (if_hw_tsomax - hdrlen -
21404 max_linkhdr);
21405 if (max_len <= 0) {
21406 len = 0;
21407 } else if (len > max_len) {
21408 sendalot = 1;
21409 len = max_len;
21410 mark = 2;
21414 * Prevent the last segment from being fractional
21415 * unless the send sockbuf can be emptied:
21417 max_len = (tp->t_maxseg - optlen);
21418 if ((sb_offset + len) < sbavail(sb)) {
21419 moff = len % (u_int)max_len;
21420 if (moff != 0) {
21421 mark = 3;
21422 len -= moff;
21426 * In case there are too many small fragments don't
21427 * use TSO:
21429 if (len <= max_len) {
21430 mark = 4;
21431 tso = 0;
21434 * Send the FIN in a separate segment after the bulk
21435 * sending is done. We don't trust the TSO
21436 * implementations to clear the FIN flag on all but
21437 * the last segment.
21439 if (tp->t_flags & TF_NEEDFIN) {
21440 sendalot = 4;
21442 } else {
21443 mark = 5;
21444 if (optlen + ipoptlen >= tp->t_maxseg) {
21446 * Since we don't have enough space to put
21447 * the IP header chain and the TCP header in
21448 * one packet as required by RFC 7112, don't
21449 * send it. Also ensure that at least one
21450 * byte of the payload can be put into the
21451 * TCP segment.
21453 SOCK_SENDBUF_UNLOCK(so);
21454 error = EMSGSIZE;
21455 sack_rxmit = 0;
21456 goto out;
21458 len = tp->t_maxseg - optlen - ipoptlen;
21459 sendalot = 5;
21461 } else {
21462 tso = 0;
21463 mark = 6;
21465 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
21466 ("%s: len > IP_MAXPACKET", __func__));
21467 #ifdef DIAGNOSTIC
21468 #ifdef INET6
21469 if (max_linkhdr + hdrlen > MCLBYTES)
21470 #else
21471 if (max_linkhdr + hdrlen > MHLEN)
21472 #endif
21473 panic("tcphdr too big");
21474 #endif
21477 * This KASSERT is here to catch edge cases at a well defined place.
21478 * Before, those had triggered (random) panic conditions further
21479 * down.
21481 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
21482 if ((len == 0) &&
21483 (flags & TH_FIN) &&
21484 (sbused(sb))) {
21486 * We have outstanding data, don't send a fin by itself!.
21488 * Check to see if we need to send a challenge ack.
21490 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) &&
21491 ((tp->snd_max - tp->snd_una) <= segsiz)) {
21493 * Ok less than or right at a MSS is
21494 * outstanding. The original FreeBSD stack would
21495 * have sent a FIN, which can speed things up for
21496 * a transactional application doing a MSG_WAITALL.
21497 * To speed things up since we do *not* send a FIN
21498 * if data is outstanding, we send a "challenge ack".
21499 * The idea behind that is instead of having to have
21500 * the peer wait for the delayed-ack timer to run off
21501 * we send an ack that makes the peer send us an ack.
21503 rack_send_ack_challange(rack);
21505 goto just_return;
21508 * Grab a header mbuf, attaching a copy of data to be transmitted,
21509 * and initialize the header from the template for sends on this
21510 * connection.
21512 hw_tls = tp->t_nic_ktls_xmit != 0;
21513 if (len) {
21514 uint32_t max_val;
21515 uint32_t moff;
21517 if (pace_max_seg)
21518 max_val = pace_max_seg;
21519 else
21520 max_val = len;
21522 * We allow a limit on sending with hptsi.
21524 if (len > max_val) {
21525 mark = 7;
21526 len = max_val;
21528 #ifdef INET6
21529 if (MHLEN < hdrlen + max_linkhdr)
21530 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
21531 else
21532 #endif
21533 m = m_gethdr(M_NOWAIT, MT_DATA);
21535 if (m == NULL) {
21536 SOCK_SENDBUF_UNLOCK(so);
21537 error = ENOBUFS;
21538 sack_rxmit = 0;
21539 goto out;
21541 m->m_data += max_linkhdr;
21542 m->m_len = hdrlen;
21545 * Start the m_copy functions from the closest mbuf to the
21546 * sb_offset in the socket buffer chain.
21548 mb = sbsndptr_noadv(sb, sb_offset, &moff);
21549 s_mb = mb;
21550 s_moff = moff;
21551 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
21552 m_copydata(mb, moff, (int)len,
21553 mtod(m, caddr_t)+hdrlen);
21555 * If we are not retransmitting advance the
21556 * sndptr to help remember the next place in
21557 * the sb.
21559 if (rsm == NULL)
21560 sbsndptr_adv(sb, mb, len);
21561 m->m_len += len;
21562 } else {
21563 struct sockbuf *msb;
21566 * If we are not retransmitting pass in msb so
21567 * the socket buffer can be advanced. Otherwise
21568 * set it to NULL if its a retransmission since
21569 * we don't want to change the sb remembered
21570 * location.
21572 if (rsm == NULL)
21573 msb = sb;
21574 else
21575 msb = NULL;
21576 m->m_next = tcp_m_copym(
21577 mb, moff, &len,
21578 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
21579 ((rsm == NULL) ? hw_tls : 0)
21580 #ifdef NETFLIX_COPY_ARGS
21581 , &s_mb, &s_moff
21582 #endif
21584 if (len <= (tp->t_maxseg - optlen)) {
21586 * Must have ran out of mbufs for the copy
21587 * shorten it to no longer need tso. Lets
21588 * not put on sendalot since we are low on
21589 * mbufs.
21591 tso = 0;
21593 if (m->m_next == NULL) {
21594 SOCK_SENDBUF_UNLOCK(so);
21595 (void)m_free(m);
21596 error = ENOBUFS;
21597 sack_rxmit = 0;
21598 goto out;
21601 if (sack_rxmit) {
21602 if (rsm && (rsm->r_flags & RACK_TLP)) {
21604 * TLP should not count in retran count, but
21605 * in its own bin
21607 counter_u64_add(rack_tlp_retran, 1);
21608 counter_u64_add(rack_tlp_retran_bytes, len);
21609 } else {
21610 tp->t_sndrexmitpack++;
21611 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
21612 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
21614 #ifdef STATS
21615 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
21616 len);
21617 #endif
21618 } else {
21619 KMOD_TCPSTAT_INC(tcps_sndpack);
21620 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
21621 #ifdef STATS
21622 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
21623 len);
21624 #endif
21627 * If we're sending everything we've got, set PUSH. (This
21628 * will keep happy those implementations which only give
21629 * data to the user when a buffer fills or a PUSH comes in.)
21631 if (sb_offset + len == sbused(sb) &&
21632 sbused(sb) &&
21633 !(flags & TH_SYN)) {
21634 flags |= TH_PUSH;
21635 add_flag |= RACK_HAD_PUSH;
21638 SOCK_SENDBUF_UNLOCK(so);
21639 } else {
21640 SOCK_SENDBUF_UNLOCK(so);
21641 if (tp->t_flags & TF_ACKNOW)
21642 KMOD_TCPSTAT_INC(tcps_sndacks);
21643 else if (flags & (TH_SYN | TH_FIN | TH_RST))
21644 KMOD_TCPSTAT_INC(tcps_sndctrl);
21645 else
21646 KMOD_TCPSTAT_INC(tcps_sndwinup);
21648 m = m_gethdr(M_NOWAIT, MT_DATA);
21649 if (m == NULL) {
21650 error = ENOBUFS;
21651 sack_rxmit = 0;
21652 goto out;
21654 #ifdef INET6
21655 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
21656 MHLEN >= hdrlen) {
21657 M_ALIGN(m, hdrlen);
21658 } else
21659 #endif
21660 m->m_data += max_linkhdr;
21661 m->m_len = hdrlen;
21663 SOCK_SENDBUF_UNLOCK_ASSERT(so);
21664 m->m_pkthdr.rcvif = (struct ifnet *)0;
21665 #ifdef MAC
21666 mac_inpcb_create_mbuf(inp, m);
21667 #endif
21668 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
21669 #ifdef INET6
21670 if (isipv6)
21671 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
21672 else
21673 #endif /* INET6 */
21674 #ifdef INET
21675 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
21676 #endif
21677 th = rack->r_ctl.fsb.th;
21678 udp = rack->r_ctl.fsb.udp;
21679 if (udp) {
21680 #ifdef INET6
21681 if (isipv6)
21682 ulen = hdrlen + len - sizeof(struct ip6_hdr);
21683 else
21684 #endif /* INET6 */
21685 ulen = hdrlen + len - sizeof(struct ip);
21686 udp->uh_ulen = htons(ulen);
21688 } else {
21689 #ifdef INET6
21690 if (isipv6) {
21691 ip6 = mtod(m, struct ip6_hdr *);
21692 if (tp->t_port) {
21693 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
21694 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
21695 udp->uh_dport = tp->t_port;
21696 ulen = hdrlen + len - sizeof(struct ip6_hdr);
21697 udp->uh_ulen = htons(ulen);
21698 th = (struct tcphdr *)(udp + 1);
21699 } else
21700 th = (struct tcphdr *)(ip6 + 1);
21701 tcpip_fillheaders(inp, tp->t_port, ip6, th);
21702 } else
21703 #endif /* INET6 */
21705 #ifdef INET
21706 ip = mtod(m, struct ip *);
21707 if (tp->t_port) {
21708 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
21709 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
21710 udp->uh_dport = tp->t_port;
21711 ulen = hdrlen + len - sizeof(struct ip);
21712 udp->uh_ulen = htons(ulen);
21713 th = (struct tcphdr *)(udp + 1);
21714 } else
21715 th = (struct tcphdr *)(ip + 1);
21716 tcpip_fillheaders(inp, tp->t_port, ip, th);
21717 #endif
21721 * If we are starting a connection, send ECN setup SYN packet. If we
21722 * are on a retransmit, we may resend those bits a number of times
21723 * as per RFC 3168.
21725 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
21726 flags |= tcp_ecn_output_syn_sent(tp);
21728 /* Also handle parallel SYN for ECN */
21729 if (TCPS_HAVERCVDSYN(tp->t_state) &&
21730 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
21731 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
21732 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
21733 (tp->t_flags2 & TF2_ECN_SND_ECE))
21734 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
21735 #ifdef INET6
21736 if (isipv6) {
21737 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
21738 ip6->ip6_flow |= htonl(ect << 20);
21740 else
21741 #endif
21743 #ifdef INET
21744 ip->ip_tos &= ~IPTOS_ECN_MASK;
21745 ip->ip_tos |= ect;
21746 #endif
21749 th->th_seq = htonl(rack_seq);
21750 th->th_ack = htonl(tp->rcv_nxt);
21751 tcp_set_flags(th, flags);
21753 * Calculate receive window. Don't shrink window, but avoid silly
21754 * window syndrome.
21755 * If a RST segment is sent, advertise a window of zero.
21757 if (flags & TH_RST) {
21758 recwin = 0;
21759 } else {
21760 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
21761 recwin < (long)segsiz) {
21762 recwin = 0;
21764 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
21765 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
21766 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
21770 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
21771 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
21772 * handled in syncache.
21774 if (flags & TH_SYN)
21775 th->th_win = htons((u_short)
21776 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
21777 else {
21778 /* Avoid shrinking window with window scaling. */
21779 recwin = roundup2(recwin, 1 << tp->rcv_scale);
21780 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
21783 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
21784 * window. This may cause the remote transmitter to stall. This
21785 * flag tells soreceive() to disable delayed acknowledgements when
21786 * draining the buffer. This can occur if the receiver is
21787 * attempting to read more data than can be buffered prior to
21788 * transmitting on the connection.
21790 if (th->th_win == 0) {
21791 tp->t_sndzerowin++;
21792 tp->t_flags |= TF_RXWIN0SENT;
21793 } else
21794 tp->t_flags &= ~TF_RXWIN0SENT;
21795 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
21796 /* Now are we using fsb?, if so copy the template data to the mbuf */
21797 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
21798 uint8_t *cpto;
21800 cpto = mtod(m, uint8_t *);
21801 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
21803 * We have just copied in:
21804 * IP/IP6
21805 * <optional udphdr>
21806 * tcphdr (no options)
21808 * We need to grab the correct pointers into the mbuf
21809 * for both the tcp header, and possibly the udp header (if tunneling).
21810 * We do this by using the offset in the copy buffer and adding it
21811 * to the mbuf base pointer (cpto).
21813 #ifdef INET6
21814 if (isipv6)
21815 ip6 = mtod(m, struct ip6_hdr *);
21816 else
21817 #endif /* INET6 */
21818 #ifdef INET
21819 ip = mtod(m, struct ip *);
21820 #endif
21821 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
21822 /* If we have a udp header lets set it into the mbuf as well */
21823 if (udp)
21824 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
21826 if (optlen) {
21827 bcopy(opt, th + 1, optlen);
21828 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
21831 * Put TCP length in extended header, and then checksum extended
21832 * header and data.
21834 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
21835 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
21836 if (to.to_flags & TOF_SIGNATURE) {
21838 * Calculate MD5 signature and put it into the place
21839 * determined before.
21840 * NOTE: since TCP options buffer doesn't point into
21841 * mbuf's data, calculate offset and use it.
21843 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
21844 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
21846 * Do not send segment if the calculation of MD5
21847 * digest has failed.
21849 goto out;
21852 #endif
21853 #ifdef INET6
21854 if (isipv6) {
21856 * ip6_plen is not need to be filled now, and will be filled
21857 * in ip6_output.
21859 if (tp->t_port) {
21860 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
21861 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
21862 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
21863 th->th_sum = htons(0);
21864 UDPSTAT_INC(udps_opackets);
21865 } else {
21866 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
21867 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
21868 th->th_sum = in6_cksum_pseudo(ip6,
21869 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
21873 #endif
21874 #if defined(INET6) && defined(INET)
21875 else
21876 #endif
21877 #ifdef INET
21879 if (tp->t_port) {
21880 m->m_pkthdr.csum_flags = CSUM_UDP;
21881 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
21882 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
21883 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
21884 th->th_sum = htons(0);
21885 UDPSTAT_INC(udps_opackets);
21886 } else {
21887 m->m_pkthdr.csum_flags = CSUM_TCP;
21888 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
21889 th->th_sum = in_pseudo(ip->ip_src.s_addr,
21890 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
21891 IPPROTO_TCP + len + optlen));
21893 /* IP version must be set here for ipv4/ipv6 checking later */
21894 KASSERT(ip->ip_v == IPVERSION,
21895 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
21897 #endif
21899 * Enable TSO and specify the size of the segments. The TCP pseudo
21900 * header checksum is always provided. XXX: Fixme: This is currently
21901 * not the case for IPv6.
21903 if (tso) {
21905 * Here we must use t_maxseg and the optlen since
21906 * the optlen may include SACK's (or DSACK).
21908 KASSERT(len > tp->t_maxseg - optlen,
21909 ("%s: len <= tso_segsz", __func__));
21910 m->m_pkthdr.csum_flags |= CSUM_TSO;
21911 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
21913 KASSERT(len + hdrlen == m_length(m, NULL),
21914 ("%s: mbuf chain different than expected: %d + %u != %u",
21915 __func__, len, hdrlen, m_length(m, NULL)));
21917 #ifdef TCP_HHOOK
21918 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
21919 hhook_run_tcp_est_out(tp, th, &to, len, tso);
21920 #endif
21921 if ((rack->r_ctl.crte != NULL) &&
21922 (rack->rc_hw_nobuf == 0) &&
21923 tcp_bblogging_on(tp)) {
21924 rack_log_queue_level(tp, rack, len, &tv, cts);
21926 /* We're getting ready to send; log now. */
21927 if (tcp_bblogging_on(rack->rc_tp)) {
21928 union tcp_log_stackspecific log;
21930 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
21931 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
21932 if (rack->rack_no_prr)
21933 log.u_bbr.flex1 = 0;
21934 else
21935 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
21936 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
21937 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
21938 log.u_bbr.flex4 = orig_len;
21939 /* Save off the early/late values */
21940 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
21941 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
21942 log.u_bbr.bw_inuse = rack_get_bw(rack);
21943 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
21944 log.u_bbr.flex8 = 0;
21945 if (rsm) {
21946 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
21947 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
21948 counter_u64_add(rack_collapsed_win_rxt, 1);
21949 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
21951 if (doing_tlp)
21952 log.u_bbr.flex8 = 2;
21953 else
21954 log.u_bbr.flex8 = 1;
21955 } else {
21956 if (doing_tlp)
21957 log.u_bbr.flex8 = 3;
21959 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
21960 log.u_bbr.flex7 = mark;
21961 log.u_bbr.flex7 <<= 8;
21962 log.u_bbr.flex7 |= pass;
21963 log.u_bbr.pkts_out = tp->t_maxseg;
21964 log.u_bbr.timeStamp = cts;
21965 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
21966 if (rsm && (rsm->r_rtr_cnt > 0)) {
21968 * When we have a retransmit we want to log the
21969 * burst at send and flight at send from before.
21971 log.u_bbr.flex5 = rsm->r_fas;
21972 log.u_bbr.bbr_substate = rsm->r_bas;
21973 } else {
21975 * New transmits we log in flex5 the inflight again as
21976 * well as the number of segments in our send in the
21977 * substate field.
21979 log.u_bbr.flex5 = log.u_bbr.inflight;
21980 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
21982 log.u_bbr.lt_epoch = cwnd_to_use;
21983 log.u_bbr.delivered = sendalot;
21984 log.u_bbr.rttProp = (uintptr_t)rsm;
21985 log.u_bbr.pkt_epoch = __LINE__;
21986 if (rsm) {
21987 log.u_bbr.delRate = rsm->r_flags;
21988 log.u_bbr.delRate <<= 31;
21989 log.u_bbr.delRate |= rack->r_must_retran;
21990 log.u_bbr.delRate <<= 1;
21991 log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
21992 } else {
21993 log.u_bbr.delRate = rack->r_must_retran;
21994 log.u_bbr.delRate <<= 1;
21995 log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
21997 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
21998 len, &log, false, NULL, __func__, __LINE__, &tv);
21999 } else
22000 lgb = NULL;
22003 * Fill in IP length and desired time to live and send to IP level.
22004 * There should be a better way to handle ttl and tos; we could keep
22005 * them in the template, but need a way to checksum without them.
22008 * m->m_pkthdr.len should have been set before cksum calcuration,
22009 * because in6_cksum() need it.
22011 #ifdef INET6
22012 if (isipv6) {
22014 * we separately set hoplimit for every segment, since the
22015 * user might want to change the value via setsockopt. Also,
22016 * desired default hop limit might be changed via Neighbor
22017 * Discovery.
22019 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
22022 * Set the packet size here for the benefit of DTrace
22023 * probes. ip6_output() will set it properly; it's supposed
22024 * to include the option header lengths as well.
22026 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
22028 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
22029 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
22030 else
22031 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
22033 if (tp->t_state == TCPS_SYN_SENT)
22034 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
22036 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
22037 /* TODO: IPv6 IP6TOS_ECT bit on */
22038 error = ip6_output(m,
22039 inp->in6p_outputopts,
22040 &inp->inp_route6,
22041 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
22042 NULL, NULL, inp);
22044 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
22045 mtu = inp->inp_route6.ro_nh->nh_mtu;
22047 #endif /* INET6 */
22048 #if defined(INET) && defined(INET6)
22049 else
22050 #endif
22051 #ifdef INET
22053 ip->ip_len = htons(m->m_pkthdr.len);
22054 #ifdef INET6
22055 if (inp->inp_vflag & INP_IPV6PROTO)
22056 ip->ip_ttl = in6_selecthlim(inp, NULL);
22057 #endif /* INET6 */
22058 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
22060 * If we do path MTU discovery, then we set DF on every
22061 * packet. This might not be the best thing to do according
22062 * to RFC3390 Section 2. However the tcp hostcache migitates
22063 * the problem so it affects only the first tcp connection
22064 * with a host.
22066 * NB: Don't set DF on small MTU/MSS to have a safe
22067 * fallback.
22069 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
22070 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
22071 if (tp->t_port == 0 || len < V_tcp_minmss) {
22072 ip->ip_off |= htons(IP_DF);
22074 } else {
22075 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
22078 if (tp->t_state == TCPS_SYN_SENT)
22079 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
22081 TCP_PROBE5(send, NULL, tp, ip, tp, th);
22083 error = ip_output(m,
22084 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
22085 inp->inp_options,
22086 #else
22087 NULL,
22088 #endif
22089 &inp->inp_route,
22090 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
22091 inp);
22092 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
22093 mtu = inp->inp_route.ro_nh->nh_mtu;
22095 #endif /* INET */
22096 if (lgb) {
22097 lgb->tlb_errno = error;
22098 lgb = NULL;
22101 out:
22103 * In transmit state, time the transmission and arrange for the
22104 * retransmit. In persist state, just set snd_max.
22106 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
22107 rack_to_usec_ts(&tv),
22108 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz);
22109 if (error == 0) {
22110 if (add_flag & RACK_IS_PCM) {
22111 /* We just launched a PCM */
22112 /* rrs here log */
22113 rack->pcm_in_progress = 1;
22114 rack->pcm_needed = 0;
22115 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag);
22117 if (rsm == NULL) {
22118 if (rack->lt_bw_up == 0) {
22119 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
22120 rack->r_ctl.lt_seq = tp->snd_una;
22121 rack->lt_bw_up = 1;
22122 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) {
22124 * Need to record what we have since we are
22125 * approaching seq wrap.
22127 uint64_t tmark;
22129 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq);
22130 rack->r_ctl.lt_seq = tp->snd_una;
22131 tmark = tcp_get_u64_usecs(&tv);
22132 if (tmark > rack->r_ctl.lt_timemark) {
22133 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
22134 rack->r_ctl.lt_timemark = tmark;
22138 rack->forced_ack = 0; /* If we send something zap the FA flag */
22139 counter_u64_add(rack_total_bytes, len);
22140 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
22141 if (rsm && doing_tlp) {
22142 rack->rc_last_sent_tlp_past_cumack = 0;
22143 rack->rc_last_sent_tlp_seq_valid = 1;
22144 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
22145 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
22147 if (rack->rc_hw_nobuf) {
22148 rack->rc_hw_nobuf = 0;
22149 rack->r_ctl.rc_agg_delayed = 0;
22150 rack->r_early = 0;
22151 rack->r_late = 0;
22152 rack->r_ctl.rc_agg_early = 0;
22154 if (rsm && (doing_tlp == 0)) {
22155 /* Set we retransmitted */
22156 rack->rc_gp_saw_rec = 1;
22157 } else {
22158 if (cwnd_to_use > tp->snd_ssthresh) {
22159 /* Set we sent in CA */
22160 rack->rc_gp_saw_ca = 1;
22161 } else {
22162 /* Set we sent in SS */
22163 rack->rc_gp_saw_ss = 1;
22166 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
22167 (tp->t_flags & TF_SACK_PERMIT) &&
22168 tp->rcv_numsacks > 0)
22169 tcp_clean_dsack_blocks(tp);
22170 tot_len_this_send += len;
22171 if (len == 0) {
22172 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
22173 } else {
22174 int idx;
22176 idx = (len / segsiz) + 3;
22177 if (idx >= TCP_MSS_ACCT_ATIMER)
22178 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
22179 else
22180 counter_u64_add(rack_out_size[idx], 1);
22183 if ((rack->rack_no_prr == 0) &&
22184 sub_from_prr &&
22185 (error == 0)) {
22186 if (rack->r_ctl.rc_prr_sndcnt >= len)
22187 rack->r_ctl.rc_prr_sndcnt -= len;
22188 else
22189 rack->r_ctl.rc_prr_sndcnt = 0;
22191 sub_from_prr = 0;
22192 if (doing_tlp) {
22193 /* Make sure the TLP is added */
22194 add_flag |= RACK_TLP;
22195 } else if (rsm) {
22196 /* If its a resend without TLP then it must not have the flag */
22197 rsm->r_flags &= ~RACK_TLP;
22201 if ((error == 0) &&
22202 (len > 0) &&
22203 (tp->snd_una == tp->snd_max))
22204 rack->r_ctl.rc_tlp_rxt_last_time = cts;
22208 * This block is not associated with the above error == 0 test.
22209 * It is used to advance snd_max if we have a new transmit.
22211 tcp_seq startseq = tp->snd_max;
22214 if (rsm && (doing_tlp == 0))
22215 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
22216 if (error)
22217 /* We don't log or do anything with errors */
22218 goto nomore;
22219 if (doing_tlp == 0) {
22220 if (rsm == NULL) {
22222 * Not a retransmission of some
22223 * sort, new data is going out so
22224 * clear our TLP count and flag.
22226 rack->rc_tlp_in_progress = 0;
22227 rack->r_ctl.rc_tlp_cnt_out = 0;
22229 } else {
22231 * We have just sent a TLP, mark that it is true
22232 * and make sure our in progress is set so we
22233 * continue to check the count.
22235 rack->rc_tlp_in_progress = 1;
22236 rack->r_ctl.rc_tlp_cnt_out++;
22239 * If we are retransmitting we are done, snd_max
22240 * does not get updated.
22242 if (sack_rxmit)
22243 goto nomore;
22244 if ((tp->snd_una == tp->snd_max) && (len > 0)) {
22246 * Update the time we just added data since
22247 * nothing was outstanding.
22249 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
22250 tp->t_acktime = ticks;
22253 * Now for special SYN/FIN handling.
22255 if (flags & (TH_SYN | TH_FIN)) {
22256 if ((flags & TH_SYN) &&
22257 ((tp->t_flags & TF_SENTSYN) == 0)) {
22258 tp->snd_max++;
22259 tp->t_flags |= TF_SENTSYN;
22261 if ((flags & TH_FIN) &&
22262 ((tp->t_flags & TF_SENTFIN) == 0)) {
22263 tp->snd_max++;
22264 tp->t_flags |= TF_SENTFIN;
22267 tp->snd_max += len;
22268 if (rack->rc_new_rnd_needed) {
22269 rack_new_round_starts(tp, rack, tp->snd_max);
22272 * Time this transmission if not a retransmission and
22273 * not currently timing anything.
22274 * This is only relevant in case of switching back to
22275 * the base stack.
22277 if (tp->t_rtttime == 0) {
22278 tp->t_rtttime = ticks;
22279 tp->t_rtseq = startseq;
22280 KMOD_TCPSTAT_INC(tcps_segstimed);
22282 if (len &&
22283 ((tp->t_flags & TF_GPUTINPROG) == 0))
22284 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
22286 * If we are doing FO we need to update the mbuf position and subtract
22287 * this happens when the peer sends us duplicate information and
22288 * we thus want to send a DSACK.
22290 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
22291 * turned off? If not then we are going to echo multiple DSACK blocks
22292 * out (with the TSO), which we should not be doing.
22294 if (rack->r_fast_output && len) {
22295 if (rack->r_ctl.fsb.left_to_send > len)
22296 rack->r_ctl.fsb.left_to_send -= len;
22297 else
22298 rack->r_ctl.fsb.left_to_send = 0;
22299 if (rack->r_ctl.fsb.left_to_send < segsiz)
22300 rack->r_fast_output = 0;
22301 if (rack->r_fast_output) {
22302 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
22303 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
22304 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
22307 if (rack_pcm_blast == 0) {
22308 if ((orig_len > len) &&
22309 (add_flag & RACK_IS_PCM) &&
22310 (len < pace_max_seg) &&
22311 ((pace_max_seg - len) > segsiz)) {
22313 * We are doing a PCM measurement and we did
22314 * not get enough data in the TSO to meet the
22315 * burst requirement.
22317 uint32_t n_len;
22319 n_len = (orig_len - len);
22320 orig_len -= len;
22321 pace_max_seg -= len;
22322 len = n_len;
22323 sb_offset = tp->snd_max - tp->snd_una;
22324 /* Re-lock for the next spin */
22325 SOCK_SENDBUF_LOCK(so);
22326 goto send;
22328 } else {
22329 if ((orig_len > len) &&
22330 (add_flag & RACK_IS_PCM) &&
22331 ((orig_len - len) > segsiz)) {
22333 * We are doing a PCM measurement and we did
22334 * not get enough data in the TSO to meet the
22335 * burst requirement.
22337 uint32_t n_len;
22339 n_len = (orig_len - len);
22340 orig_len -= len;
22341 len = n_len;
22342 sb_offset = tp->snd_max - tp->snd_una;
22343 /* Re-lock for the next spin */
22344 SOCK_SENDBUF_LOCK(so);
22345 goto send;
22349 nomore:
22350 if (error) {
22351 rack->r_ctl.rc_agg_delayed = 0;
22352 rack->r_early = 0;
22353 rack->r_late = 0;
22354 rack->r_ctl.rc_agg_early = 0;
22355 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
22357 * Failures do not advance the seq counter above. For the
22358 * case of ENOBUFS we will fall out and retry in 1ms with
22359 * the hpts. Everything else will just have to retransmit
22360 * with the timer.
22362 * In any case, we do not want to loop around for another
22363 * send without a good reason.
22365 sendalot = 0;
22366 switch (error) {
22367 case EPERM:
22368 case EACCES:
22369 tp->t_softerror = error;
22370 #ifdef TCP_ACCOUNTING
22371 crtsc = get_cyclecount();
22372 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22373 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22375 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22376 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22378 sched_unpin();
22379 #endif
22380 return (error);
22381 case ENOBUFS:
22383 * Pace us right away to retry in a some
22384 * time
22386 if (rack->r_ctl.crte != NULL) {
22387 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
22388 if (tcp_bblogging_on(rack->rc_tp))
22389 rack_log_queue_level(tp, rack, len, &tv, cts);
22390 } else
22391 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
22392 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
22393 if (rack->rc_enobuf < 0x7f)
22394 rack->rc_enobuf++;
22395 if (slot < (10 * HPTS_USEC_IN_MSEC))
22396 slot = 10 * HPTS_USEC_IN_MSEC;
22397 if (rack->r_ctl.crte != NULL) {
22398 counter_u64_add(rack_saw_enobuf_hw, 1);
22399 tcp_rl_log_enobuf(rack->r_ctl.crte);
22401 counter_u64_add(rack_saw_enobuf, 1);
22402 goto enobufs;
22403 case EMSGSIZE:
22405 * For some reason the interface we used initially
22406 * to send segments changed to another or lowered
22407 * its MTU. If TSO was active we either got an
22408 * interface without TSO capabilits or TSO was
22409 * turned off. If we obtained mtu from ip_output()
22410 * then update it and try again.
22412 if (tso)
22413 tp->t_flags &= ~TF_TSO;
22414 if (mtu != 0) {
22415 int saved_mtu;
22417 saved_mtu = tp->t_maxseg;
22418 tcp_mss_update(tp, -1, mtu, NULL, NULL);
22419 if (saved_mtu > tp->t_maxseg) {
22420 goto again;
22423 slot = 10 * HPTS_USEC_IN_MSEC;
22424 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
22425 #ifdef TCP_ACCOUNTING
22426 crtsc = get_cyclecount();
22427 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22428 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22430 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22431 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22433 sched_unpin();
22434 #endif
22435 return (error);
22436 case ENETUNREACH:
22437 counter_u64_add(rack_saw_enetunreach, 1);
22438 /* FALLTHROUGH */
22439 case EHOSTDOWN:
22440 case EHOSTUNREACH:
22441 case ENETDOWN:
22442 if (TCPS_HAVERCVDSYN(tp->t_state)) {
22443 tp->t_softerror = error;
22444 error = 0;
22446 /* FALLTHROUGH */
22447 default:
22448 slot = 10 * HPTS_USEC_IN_MSEC;
22449 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
22450 #ifdef TCP_ACCOUNTING
22451 crtsc = get_cyclecount();
22452 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22453 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
22455 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22456 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
22458 sched_unpin();
22459 #endif
22460 return (error);
22462 } else {
22463 rack->rc_enobuf = 0;
22464 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
22465 rack->r_ctl.retran_during_recovery += len;
22467 KMOD_TCPSTAT_INC(tcps_sndtotal);
22470 * Data sent (as far as we can tell). If this advertises a larger
22471 * window than any other segment, then remember the size of the
22472 * advertised window. Any pending ACK has now been sent.
22474 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
22475 tp->rcv_adv = tp->rcv_nxt + recwin;
22477 tp->last_ack_sent = tp->rcv_nxt;
22478 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
22479 enobufs:
22480 if (sendalot) {
22481 /* Do we need to turn off sendalot? */
22482 if (pace_max_seg &&
22483 (tot_len_this_send >= pace_max_seg)) {
22484 /* We hit our max. */
22485 sendalot = 0;
22488 if ((error == 0) && (flags & TH_FIN))
22489 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
22490 if (flags & TH_RST) {
22492 * We don't send again after sending a RST.
22494 slot = 0;
22495 sendalot = 0;
22496 if (error == 0)
22497 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
22498 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
22500 * Get our pacing rate, if an error
22501 * occurred in sending (ENOBUF) we would
22502 * hit the else if with slot preset. Other
22503 * errors return.
22505 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__);
22507 /* We have sent clear the flag */
22508 rack->r_ent_rec_ns = 0;
22509 if (rack->r_must_retran) {
22510 if (rsm) {
22511 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
22512 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
22514 * We have retransmitted all.
22516 rack->r_must_retran = 0;
22517 rack->r_ctl.rc_out_at_rto = 0;
22519 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
22521 * Sending new data will also kill
22522 * the loop.
22524 rack->r_must_retran = 0;
22525 rack->r_ctl.rc_out_at_rto = 0;
22528 rack->r_ctl.fsb.recwin = recwin;
22529 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
22530 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
22532 * We hit an RTO and now have past snd_max at the RTO
22533 * clear all the WAS flags.
22535 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
22537 if (slot) {
22538 /* set the rack tcb into the slot N */
22539 if ((error == 0) &&
22540 rack_use_rfo &&
22541 ((flags & (TH_SYN|TH_FIN)) == 0) &&
22542 (rsm == NULL) &&
22543 (ipoptlen == 0) &&
22544 rack->r_fsb_inited &&
22545 TCPS_HAVEESTABLISHED(tp->t_state) &&
22546 ((IN_RECOVERY(tp->t_flags)) == 0) &&
22547 (rack->r_must_retran == 0) &&
22548 ((tp->t_flags & TF_NEEDFIN) == 0) &&
22549 (len > 0) && (orig_len > 0) &&
22550 (orig_len > len) &&
22551 ((orig_len - len) >= segsiz) &&
22552 ((optlen == 0) ||
22553 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
22554 /* We can send at least one more MSS using our fsb */
22555 rack_setup_fast_output(tp, rack, sb, len, orig_len,
22556 segsiz, pace_max_seg, hw_tls, flags);
22557 } else
22558 rack->r_fast_output = 0;
22559 rack_log_fsb(rack, tp, so, flags,
22560 ipoptlen, orig_len, len, error,
22561 (rsm == NULL), optlen, __LINE__, 2);
22562 } else if (sendalot) {
22563 int ret;
22565 sack_rxmit = 0;
22566 if ((error == 0) &&
22567 rack_use_rfo &&
22568 ((flags & (TH_SYN|TH_FIN)) == 0) &&
22569 (rsm == NULL) &&
22570 (ipoptlen == 0) &&
22571 (rack->r_must_retran == 0) &&
22572 rack->r_fsb_inited &&
22573 TCPS_HAVEESTABLISHED(tp->t_state) &&
22574 ((IN_RECOVERY(tp->t_flags)) == 0) &&
22575 ((tp->t_flags & TF_NEEDFIN) == 0) &&
22576 (len > 0) && (orig_len > 0) &&
22577 (orig_len > len) &&
22578 ((orig_len - len) >= segsiz) &&
22579 ((optlen == 0) ||
22580 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
22581 /* we can use fast_output for more */
22582 rack_setup_fast_output(tp, rack, sb, len, orig_len,
22583 segsiz, pace_max_seg, hw_tls, flags);
22584 if (rack->r_fast_output) {
22585 error = 0;
22586 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
22587 if (ret >= 0)
22588 return (ret);
22589 else if (error)
22590 goto nomore;
22594 goto again;
22596 skip_all_send:
22597 /* Assure when we leave that snd_nxt will point to top */
22598 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
22599 tp->snd_nxt = tp->snd_max;
22600 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
22601 #ifdef TCP_ACCOUNTING
22602 crtsc = get_cyclecount() - ts_val;
22603 if (tot_len_this_send) {
22604 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22605 tp->tcp_cnt_counters[SND_OUT_DATA]++;
22607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22608 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
22610 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22611 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
22613 } else {
22614 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22615 tp->tcp_cnt_counters[SND_OUT_ACK]++;
22617 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
22618 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
22621 sched_unpin();
22622 #endif
22623 if (error == ENOBUFS)
22624 error = 0;
22625 return (error);
22628 static void
22629 rack_update_seg(struct tcp_rack *rack)
22631 uint32_t orig_val;
22633 orig_val = rack->r_ctl.rc_pace_max_segs;
22634 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
22635 if (orig_val != rack->r_ctl.rc_pace_max_segs)
22636 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0);
22639 static void
22640 rack_mtu_change(struct tcpcb *tp)
22643 * The MSS may have changed
22645 struct tcp_rack *rack;
22646 struct rack_sendmap *rsm;
22648 rack = (struct tcp_rack *)tp->t_fb_ptr;
22649 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
22651 * The MTU has changed we need to resend everything
22652 * since all we have sent is lost. We first fix
22653 * up the mtu though.
22655 rack_set_pace_segments(tp, rack, __LINE__, NULL);
22656 /* We treat this like a full retransmit timeout without the cwnd adjustment */
22657 rack_remxt_tmr(tp);
22658 rack->r_fast_output = 0;
22659 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
22660 rack->r_ctl.rc_sacked);
22661 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
22662 rack->r_must_retran = 1;
22663 /* Mark all inflight to needing to be rxt'd */
22664 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
22665 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG);
22668 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
22669 /* We don't use snd_nxt to retransmit */
22670 tp->snd_nxt = tp->snd_max;
22673 static int
22674 rack_set_dgp(struct tcp_rack *rack)
22676 if (rack->dgp_on == 1)
22677 return(0);
22678 if ((rack->use_fixed_rate == 1) &&
22679 (rack->rc_always_pace == 1)) {
22681 * We are already pacing another
22682 * way.
22684 return (EBUSY);
22686 if (rack->rc_always_pace == 1) {
22687 rack_remove_pacing(rack);
22689 if (tcp_incr_dgp_pacing_cnt() == 0)
22690 return (ENOSPC);
22691 rack->r_ctl.pacing_method |= RACK_DGP_PACING;
22692 rack->rc_fillcw_apply_discount = 0;
22693 rack->dgp_on = 1;
22694 rack->rc_always_pace = 1;
22695 rack->rc_pace_dnd = 1;
22696 rack->use_fixed_rate = 0;
22697 if (rack->gp_ready)
22698 rack_set_cc_pacing(rack);
22699 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
22700 rack->rack_attempt_hdwr_pace = 0;
22701 /* rxt settings */
22702 rack->full_size_rxt = 1;
22703 rack->shape_rxt_to_pacing_min = 0;
22704 /* cmpack=1 */
22705 rack->r_use_cmp_ack = 1;
22706 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
22707 rack->r_use_cmp_ack)
22708 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
22709 /* scwnd=1 */
22710 rack->rack_enable_scwnd = 1;
22711 /* dynamic=100 */
22712 rack->rc_gp_dyn_mul = 1;
22713 /* gp_inc_ca */
22714 rack->r_ctl.rack_per_of_gp_ca = 100;
22715 /* rrr_conf=3 */
22716 rack->r_rr_config = 3;
22717 /* npush=2 */
22718 rack->r_ctl.rc_no_push_at_mrtt = 2;
22719 /* fillcw=1 */
22720 rack->rc_pace_to_cwnd = 1;
22721 rack->rc_pace_fill_if_rttin_range = 0;
22722 rack->rtt_limit_mul = 0;
22723 /* noprr=1 */
22724 rack->rack_no_prr = 1;
22725 /* lscwnd=1 */
22726 rack->r_limit_scw = 1;
22727 /* gp_inc_rec */
22728 rack->r_ctl.rack_per_of_gp_rec = 90;
22729 return (0);
22732 static int
22733 rack_set_profile(struct tcp_rack *rack, int prof)
22735 int err = EINVAL;
22736 if (prof == 1) {
22738 * Profile 1 is "standard" DGP. It ignores
22739 * client buffer level.
22741 err = rack_set_dgp(rack);
22742 if (err)
22743 return (err);
22744 } else if (prof == 6) {
22745 err = rack_set_dgp(rack);
22746 if (err)
22747 return (err);
22749 * Profile 6 tweaks DGP so that it will apply to
22750 * fill-cw the same settings that profile5 does
22751 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted).
22753 rack->rc_fillcw_apply_discount = 1;
22754 } else if (prof == 0) {
22755 /* This changes things back to the default settings */
22756 if (rack->rc_always_pace == 1) {
22757 rack_remove_pacing(rack);
22758 } else {
22759 /* Make sure any stray flags are off */
22760 rack->dgp_on = 0;
22761 rack->rc_hybrid_mode = 0;
22762 rack->use_fixed_rate = 0;
22764 err = 0;
22765 if (rack_fill_cw_state)
22766 rack->rc_pace_to_cwnd = 1;
22767 else
22768 rack->rc_pace_to_cwnd = 0;
22770 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
22771 rack->r_ctl.pacing_method |= RACK_REG_PACING;
22772 rack->rc_always_pace = 1;
22773 if (rack->rack_hibeta)
22774 rack_set_cc_pacing(rack);
22775 } else
22776 rack->rc_always_pace = 0;
22777 if (rack_dsack_std_based & 0x1) {
22778 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
22779 rack->rc_rack_tmr_std_based = 1;
22781 if (rack_dsack_std_based & 0x2) {
22782 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */
22783 rack->rc_rack_use_dsack = 1;
22785 if (rack_use_cmp_acks)
22786 rack->r_use_cmp_ack = 1;
22787 else
22788 rack->r_use_cmp_ack = 0;
22789 if (rack_disable_prr)
22790 rack->rack_no_prr = 1;
22791 else
22792 rack->rack_no_prr = 0;
22793 if (rack_gp_no_rec_chg)
22794 rack->rc_gp_no_rec_chg = 1;
22795 else
22796 rack->rc_gp_no_rec_chg = 0;
22797 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
22798 rack->r_mbuf_queue = 1;
22799 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
22800 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
22801 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
22802 } else {
22803 rack->r_mbuf_queue = 0;
22804 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
22806 if (rack_enable_shared_cwnd)
22807 rack->rack_enable_scwnd = 1;
22808 else
22809 rack->rack_enable_scwnd = 0;
22810 if (rack_do_dyn_mul) {
22811 /* When dynamic adjustment is on CA needs to start at 100% */
22812 rack->rc_gp_dyn_mul = 1;
22813 if (rack_do_dyn_mul >= 100)
22814 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
22815 } else {
22816 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
22817 rack->rc_gp_dyn_mul = 0;
22819 rack->r_rr_config = 0;
22820 rack->r_ctl.rc_no_push_at_mrtt = 0;
22821 rack->rc_pace_fill_if_rttin_range = 0;
22822 rack->rtt_limit_mul = 0;
22824 if (rack_enable_hw_pacing)
22825 rack->rack_hdw_pace_ena = 1;
22826 else
22827 rack->rack_hdw_pace_ena = 0;
22828 if (rack_disable_prr)
22829 rack->rack_no_prr = 1;
22830 else
22831 rack->rack_no_prr = 0;
22832 if (rack_limits_scwnd)
22833 rack->r_limit_scw = 1;
22834 else
22835 rack->r_limit_scw = 0;
22836 rack_init_retransmit_value(rack, rack_rxt_controls);
22837 err = 0;
22839 return (err);
22842 static int
22843 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
22845 struct deferred_opt_list *dol;
22847 dol = malloc(sizeof(struct deferred_opt_list),
22848 M_TCPDO, M_NOWAIT|M_ZERO);
22849 if (dol == NULL) {
22851 * No space yikes -- fail out..
22853 return (0);
22855 dol->optname = sopt_name;
22856 dol->optval = loptval;
22857 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
22858 return (1);
22861 static int
22862 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
22864 #ifdef TCP_REQUEST_TRK
22865 struct tcp_sendfile_track *sft;
22866 struct timeval tv;
22867 tcp_seq seq;
22868 int err;
22870 microuptime(&tv);
22872 /* Make sure no fixed rate is on */
22873 rack->use_fixed_rate = 0;
22874 rack->r_ctl.rc_fixed_pacing_rate_rec = 0;
22875 rack->r_ctl.rc_fixed_pacing_rate_ca = 0;
22876 rack->r_ctl.rc_fixed_pacing_rate_ss = 0;
22877 /* Now allocate or find our entry that will have these settings */
22878 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
22879 if (sft == NULL) {
22880 rack->rc_tp->tcp_hybrid_error++;
22881 /* no space, where would it have gone? */
22882 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc;
22883 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0);
22884 return (ENOSPC);
22886 /* mask our internal flags */
22887 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK;
22888 /* The seq will be snd_una + everything in the buffer */
22889 seq = sft->start_seq;
22890 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) {
22891 /* Disabling hybrid pacing */
22892 if (rack->rc_hybrid_mode) {
22893 rack_set_profile(rack, 0);
22894 rack->rc_tp->tcp_hybrid_stop++;
22896 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0);
22897 return (0);
22899 if (rack->dgp_on == 0) {
22901 * If we have not yet turned DGP on, do so
22902 * now setting pure DGP mode, no buffer level
22903 * response.
22905 if ((err = rack_set_profile(rack, 1)) != 0){
22906 /* Failed to turn pacing on */
22907 rack->rc_tp->tcp_hybrid_error++;
22908 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0);
22909 return (err);
22913 * Now we must switch to hybrid mode as well which also
22914 * means moving to regular pacing.
22916 if (rack->rc_hybrid_mode == 0) {
22917 /* First time */
22918 if (tcp_can_enable_pacing()) {
22919 rack->r_ctl.pacing_method |= RACK_REG_PACING;
22920 rack->rc_hybrid_mode = 1;
22921 } else {
22922 return (ENOSPC);
22924 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) {
22926 * This should be true.
22928 tcp_dec_dgp_pacing_cnt();
22929 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING;
22932 /* Now set in our flags */
22933 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET;
22934 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR)
22935 sft->cspr = hybrid->cspr;
22936 else
22937 sft->cspr = 0;
22938 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS)
22939 sft->hint_maxseg = hybrid->hint_maxseg;
22940 else
22941 sft->hint_maxseg = 0;
22942 rack->rc_tp->tcp_hybrid_start++;
22943 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0);
22944 return (0);
22945 #else
22946 return (ENOTSUP);
22947 #endif
22950 static int
22951 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si)
22953 /* We pulled a SSI info log out what was there */
22954 si->bytes_transmitted = tp->t_sndbytes;
22955 si->bytes_retransmitted = tp->t_snd_rxt_bytes;
22956 return (0);
22959 static int
22960 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
22961 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid)
22964 struct epoch_tracker et;
22965 struct sockopt sopt;
22966 struct cc_newreno_opts opt;
22967 uint64_t val;
22968 int error = 0;
22969 uint16_t ca, ss;
22971 switch (sopt_name) {
22972 case TCP_RACK_SET_RXT_OPTIONS:
22973 if (optval <= 2) {
22974 rack_init_retransmit_value(rack, optval);
22975 } else {
22977 * You must send in 0, 1 or 2 all else is
22978 * invalid.
22980 error = EINVAL;
22982 break;
22983 case TCP_RACK_DSACK_OPT:
22984 RACK_OPTS_INC(tcp_rack_dsack_opt);
22985 if (optval & 0x1) {
22986 rack->rc_rack_tmr_std_based = 1;
22987 } else {
22988 rack->rc_rack_tmr_std_based = 0;
22990 if (optval & 0x2) {
22991 rack->rc_rack_use_dsack = 1;
22992 } else {
22993 rack->rc_rack_use_dsack = 0;
22995 rack_log_dsack_event(rack, 5, __LINE__, 0, 0);
22996 break;
22997 case TCP_RACK_PACING_DIVISOR:
22998 RACK_OPTS_INC(tcp_rack_pacing_divisor);
22999 if (optval == 0) {
23000 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
23001 } else {
23002 if (optval < RL_MIN_DIVISOR)
23003 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR;
23004 else
23005 rack->r_ctl.pace_len_divisor = optval;
23007 break;
23008 case TCP_RACK_HI_BETA:
23009 RACK_OPTS_INC(tcp_rack_hi_beta);
23010 if (optval > 0) {
23011 rack->rack_hibeta = 1;
23012 if ((optval >= 50) &&
23013 (optval <= 100)) {
23015 * User wants to set a custom beta.
23017 rack->r_ctl.saved_hibeta = optval;
23018 if (rack->rc_pacing_cc_set)
23019 rack_undo_cc_pacing(rack);
23020 rack->r_ctl.rc_saved_beta.beta = optval;
23022 if (rack->rc_pacing_cc_set == 0)
23023 rack_set_cc_pacing(rack);
23024 } else {
23025 rack->rack_hibeta = 0;
23026 if (rack->rc_pacing_cc_set)
23027 rack_undo_cc_pacing(rack);
23029 break;
23030 case TCP_RACK_PACING_BETA:
23031 error = EINVAL;
23032 break;
23033 case TCP_RACK_TIMER_SLOP:
23034 RACK_OPTS_INC(tcp_rack_timer_slop);
23035 rack->r_ctl.timer_slop = optval;
23036 if (rack->rc_tp->t_srtt) {
23038 * If we have an SRTT lets update t_rxtcur
23039 * to have the new slop.
23041 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
23042 rack_rto_min, rack_rto_max,
23043 rack->r_ctl.timer_slop);
23045 break;
23046 case TCP_RACK_PACING_BETA_ECN:
23047 RACK_OPTS_INC(tcp_rack_beta_ecn);
23048 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
23049 /* This only works for newreno. */
23050 error = EINVAL;
23051 break;
23053 if (rack->rc_pacing_cc_set) {
23055 * Set them into the real CC module
23056 * whats in the rack pcb is the old values
23057 * to be used on restoral/
23059 sopt.sopt_dir = SOPT_SET;
23060 opt.name = CC_NEWRENO_BETA_ECN;
23061 opt.val = optval;
23062 if (CC_ALGO(tp)->ctl_output != NULL)
23063 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
23064 else
23065 error = ENOENT;
23066 } else {
23068 * Not pacing yet so set it into our local
23069 * rack pcb storage.
23071 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
23072 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
23074 break;
23075 case TCP_DEFER_OPTIONS:
23076 RACK_OPTS_INC(tcp_defer_opt);
23077 if (optval) {
23078 if (rack->gp_ready) {
23079 /* Too late */
23080 error = EINVAL;
23081 break;
23083 rack->defer_options = 1;
23084 } else
23085 rack->defer_options = 0;
23086 break;
23087 case TCP_RACK_MEASURE_CNT:
23088 RACK_OPTS_INC(tcp_rack_measure_cnt);
23089 if (optval && (optval <= 0xff)) {
23090 rack->r_ctl.req_measurements = optval;
23091 } else
23092 error = EINVAL;
23093 break;
23094 case TCP_REC_ABC_VAL:
23095 RACK_OPTS_INC(tcp_rec_abc_val);
23096 if (optval > 0)
23097 rack->r_use_labc_for_rec = 1;
23098 else
23099 rack->r_use_labc_for_rec = 0;
23100 break;
23101 case TCP_RACK_ABC_VAL:
23102 RACK_OPTS_INC(tcp_rack_abc_val);
23103 if ((optval > 0) && (optval < 255))
23104 rack->rc_labc = optval;
23105 else
23106 error = EINVAL;
23107 break;
23108 case TCP_HDWR_UP_ONLY:
23109 RACK_OPTS_INC(tcp_pacing_up_only);
23110 if (optval)
23111 rack->r_up_only = 1;
23112 else
23113 rack->r_up_only = 0;
23114 break;
23115 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */
23116 RACK_OPTS_INC(tcp_fillcw_rate_cap);
23117 rack->r_ctl.fillcw_cap = loptval;
23118 break;
23119 case TCP_PACING_RATE_CAP:
23120 RACK_OPTS_INC(tcp_pacing_rate_cap);
23121 if ((rack->dgp_on == 1) &&
23122 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) {
23124 * If we are doing DGP we need to switch
23125 * to using the pacing limit.
23127 if (tcp_can_enable_pacing() == 0) {
23128 error = ENOSPC;
23129 break;
23132 * Now change up the flags and counts to be correct.
23134 rack->r_ctl.pacing_method |= RACK_REG_PACING;
23135 tcp_dec_dgp_pacing_cnt();
23136 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING;
23138 rack->r_ctl.bw_rate_cap = loptval;
23139 break;
23140 case TCP_HYBRID_PACING:
23141 if (hybrid == NULL) {
23142 error = EINVAL;
23143 break;
23145 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) {
23146 error = EPERM;
23147 break;
23149 error = process_hybrid_pacing(rack, hybrid);
23150 break;
23151 case TCP_SIDECHAN_DIS: /* URL:scodm */
23152 if (optval)
23153 rack->r_ctl.side_chan_dis_mask = optval;
23154 else
23155 rack->r_ctl.side_chan_dis_mask = 0;
23156 break;
23157 case TCP_RACK_PROFILE:
23158 RACK_OPTS_INC(tcp_profile);
23159 error = rack_set_profile(rack, optval);
23160 break;
23161 case TCP_USE_CMP_ACKS:
23162 RACK_OPTS_INC(tcp_use_cmp_acks);
23163 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) {
23164 /* You can't turn it off once its on! */
23165 error = EINVAL;
23166 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
23167 rack->r_use_cmp_ack = 1;
23168 rack->r_mbuf_queue = 1;
23169 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
23171 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
23172 tp->t_flags2 |= TF2_MBUF_ACKCMP;
23173 break;
23174 case TCP_SHARED_CWND_TIME_LIMIT:
23175 RACK_OPTS_INC(tcp_lscwnd);
23176 if (optval)
23177 rack->r_limit_scw = 1;
23178 else
23179 rack->r_limit_scw = 0;
23180 break;
23181 case TCP_RACK_DGP_IN_REC:
23182 error = EINVAL;
23183 break;
23184 case TCP_RACK_PACE_TO_FILL:
23185 RACK_OPTS_INC(tcp_fillcw);
23186 if (optval == 0)
23187 rack->rc_pace_to_cwnd = 0;
23188 else {
23189 rack->rc_pace_to_cwnd = 1;
23191 if ((optval >= rack_gp_rtt_maxmul) &&
23192 rack_gp_rtt_maxmul &&
23193 (optval < 0xf)) {
23194 rack->rc_pace_fill_if_rttin_range = 1;
23195 rack->rtt_limit_mul = optval;
23196 } else {
23197 rack->rc_pace_fill_if_rttin_range = 0;
23198 rack->rtt_limit_mul = 0;
23200 break;
23201 case TCP_RACK_NO_PUSH_AT_MAX:
23202 RACK_OPTS_INC(tcp_npush);
23203 if (optval == 0)
23204 rack->r_ctl.rc_no_push_at_mrtt = 0;
23205 else if (optval < 0xff)
23206 rack->r_ctl.rc_no_push_at_mrtt = optval;
23207 else
23208 error = EINVAL;
23209 break;
23210 case TCP_SHARED_CWND_ENABLE:
23211 RACK_OPTS_INC(tcp_rack_scwnd);
23212 if (optval == 0)
23213 rack->rack_enable_scwnd = 0;
23214 else
23215 rack->rack_enable_scwnd = 1;
23216 break;
23217 case TCP_RACK_MBUF_QUEUE:
23218 /* Now do we use the LRO mbuf-queue feature */
23219 RACK_OPTS_INC(tcp_rack_mbufq);
23220 if (optval || rack->r_use_cmp_ack)
23221 rack->r_mbuf_queue = 1;
23222 else
23223 rack->r_mbuf_queue = 0;
23224 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
23225 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
23226 else
23227 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
23228 break;
23229 case TCP_RACK_NONRXT_CFG_RATE:
23230 RACK_OPTS_INC(tcp_rack_cfg_rate);
23231 if (optval == 0)
23232 rack->rack_rec_nonrxt_use_cr = 0;
23233 else
23234 rack->rack_rec_nonrxt_use_cr = 1;
23235 break;
23236 case TCP_NO_PRR:
23237 RACK_OPTS_INC(tcp_rack_noprr);
23238 if (optval == 0)
23239 rack->rack_no_prr = 0;
23240 else if (optval == 1)
23241 rack->rack_no_prr = 1;
23242 else if (optval == 2)
23243 rack->no_prr_addback = 1;
23244 else
23245 error = EINVAL;
23246 break;
23247 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */
23248 if (optval > 0)
23249 rack->cspr_is_fcc = 1;
23250 else
23251 rack->cspr_is_fcc = 0;
23252 break;
23253 case TCP_TIMELY_DYN_ADJ:
23254 RACK_OPTS_INC(tcp_timely_dyn);
23255 if (optval == 0)
23256 rack->rc_gp_dyn_mul = 0;
23257 else {
23258 rack->rc_gp_dyn_mul = 1;
23259 if (optval >= 100) {
23261 * If the user sets something 100 or more
23262 * its the gp_ca value.
23264 rack->r_ctl.rack_per_of_gp_ca = optval;
23267 break;
23268 case TCP_RACK_DO_DETECTION:
23269 error = EINVAL;
23270 break;
23271 case TCP_RACK_TLP_USE:
23272 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
23273 error = EINVAL;
23274 break;
23276 RACK_OPTS_INC(tcp_tlp_use);
23277 rack->rack_tlp_threshold_use = optval;
23278 break;
23279 case TCP_RACK_TLP_REDUCE:
23280 /* RACK TLP cwnd reduction (bool) */
23281 RACK_OPTS_INC(tcp_rack_tlp_reduce);
23282 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
23283 break;
23284 /* Pacing related ones */
23285 case TCP_RACK_PACE_ALWAYS:
23287 * zero is old rack method, 1 is new
23288 * method using a pacing rate.
23290 RACK_OPTS_INC(tcp_rack_pace_always);
23291 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) {
23292 error = EPERM;
23293 break;
23295 if (optval > 0) {
23296 if (rack->rc_always_pace) {
23297 error = EALREADY;
23298 break;
23299 } else if (tcp_can_enable_pacing()) {
23300 rack->r_ctl.pacing_method |= RACK_REG_PACING;
23301 rack->rc_always_pace = 1;
23302 if (rack->rack_hibeta)
23303 rack_set_cc_pacing(rack);
23305 else {
23306 error = ENOSPC;
23307 break;
23309 } else {
23310 if (rack->rc_always_pace == 1) {
23311 rack_remove_pacing(rack);
23314 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
23315 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
23316 else
23317 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
23318 /* A rate may be set irate or other, if so set seg size */
23319 rack_update_seg(rack);
23320 break;
23321 case TCP_BBR_RACK_INIT_RATE:
23322 RACK_OPTS_INC(tcp_initial_rate);
23323 val = optval;
23324 /* Change from kbits per second to bytes per second */
23325 val *= 1000;
23326 val /= 8;
23327 rack->r_ctl.init_rate = val;
23328 if (rack->rc_always_pace)
23329 rack_update_seg(rack);
23330 break;
23331 case TCP_BBR_IWINTSO:
23332 error = EINVAL;
23333 break;
23334 case TCP_RACK_FORCE_MSEG:
23335 RACK_OPTS_INC(tcp_rack_force_max_seg);
23336 if (optval)
23337 rack->rc_force_max_seg = 1;
23338 else
23339 rack->rc_force_max_seg = 0;
23340 break;
23341 case TCP_RACK_PACE_MIN_SEG:
23342 RACK_OPTS_INC(tcp_rack_min_seg);
23343 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval);
23344 rack_set_pace_segments(tp, rack, __LINE__, NULL);
23345 break;
23346 case TCP_RACK_PACE_MAX_SEG:
23347 /* Max segments size in a pace in bytes */
23348 RACK_OPTS_INC(tcp_rack_max_seg);
23349 if ((rack->dgp_on == 1) &&
23350 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) {
23352 * If we set a max-seg and are doing DGP then
23353 * we now fall under the pacing limits not the
23354 * DGP ones.
23356 if (tcp_can_enable_pacing() == 0) {
23357 error = ENOSPC;
23358 break;
23361 * Now change up the flags and counts to be correct.
23363 rack->r_ctl.pacing_method |= RACK_REG_PACING;
23364 tcp_dec_dgp_pacing_cnt();
23365 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING;
23367 if (optval <= MAX_USER_SET_SEG)
23368 rack->rc_user_set_max_segs = optval;
23369 else
23370 rack->rc_user_set_max_segs = MAX_USER_SET_SEG;
23371 rack_set_pace_segments(tp, rack, __LINE__, NULL);
23372 break;
23373 case TCP_RACK_PACE_RATE_REC:
23374 /* Set the fixed pacing rate in Bytes per second ca */
23375 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
23376 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) {
23377 error = EPERM;
23378 break;
23380 if (rack->dgp_on) {
23382 * We are already pacing another
23383 * way.
23385 error = EBUSY;
23386 break;
23388 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23389 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
23390 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23391 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
23392 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23393 rack->use_fixed_rate = 1;
23394 if (rack->rack_hibeta)
23395 rack_set_cc_pacing(rack);
23396 rack_log_pacing_delay_calc(rack,
23397 rack->r_ctl.rc_fixed_pacing_rate_ss,
23398 rack->r_ctl.rc_fixed_pacing_rate_ca,
23399 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23400 __LINE__, NULL,0);
23401 break;
23403 case TCP_RACK_PACE_RATE_SS:
23404 /* Set the fixed pacing rate in Bytes per second ca */
23405 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
23406 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) {
23407 error = EPERM;
23408 break;
23410 if (rack->dgp_on) {
23412 * We are already pacing another
23413 * way.
23415 error = EBUSY;
23416 break;
23418 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23419 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
23420 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23421 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
23422 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23423 rack->use_fixed_rate = 1;
23424 if (rack->rack_hibeta)
23425 rack_set_cc_pacing(rack);
23426 rack_log_pacing_delay_calc(rack,
23427 rack->r_ctl.rc_fixed_pacing_rate_ss,
23428 rack->r_ctl.rc_fixed_pacing_rate_ca,
23429 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23430 __LINE__, NULL, 0);
23431 break;
23433 case TCP_RACK_PACE_RATE_CA:
23434 /* Set the fixed pacing rate in Bytes per second ca */
23435 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
23436 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) {
23437 error = EPERM;
23438 break;
23440 if (rack->dgp_on) {
23442 * We are already pacing another
23443 * way.
23445 error = EBUSY;
23446 break;
23448 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
23449 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
23450 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
23451 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
23452 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
23453 rack->use_fixed_rate = 1;
23454 if (rack->rack_hibeta)
23455 rack_set_cc_pacing(rack);
23456 rack_log_pacing_delay_calc(rack,
23457 rack->r_ctl.rc_fixed_pacing_rate_ss,
23458 rack->r_ctl.rc_fixed_pacing_rate_ca,
23459 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
23460 __LINE__, NULL, 0);
23461 break;
23462 case TCP_RACK_GP_INCREASE_REC:
23463 RACK_OPTS_INC(tcp_gp_inc_rec);
23464 rack->r_ctl.rack_per_of_gp_rec = optval;
23465 rack_log_pacing_delay_calc(rack,
23466 rack->r_ctl.rack_per_of_gp_ss,
23467 rack->r_ctl.rack_per_of_gp_ca,
23468 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23469 __LINE__, NULL, 0);
23470 break;
23471 case TCP_RACK_GP_INCREASE_CA:
23472 RACK_OPTS_INC(tcp_gp_inc_ca);
23473 ca = optval;
23474 if (ca < 100) {
23476 * We don't allow any reduction
23477 * over the GP b/w.
23479 error = EINVAL;
23480 break;
23482 rack->r_ctl.rack_per_of_gp_ca = ca;
23483 rack_log_pacing_delay_calc(rack,
23484 rack->r_ctl.rack_per_of_gp_ss,
23485 rack->r_ctl.rack_per_of_gp_ca,
23486 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23487 __LINE__, NULL, 0);
23488 break;
23489 case TCP_RACK_GP_INCREASE_SS:
23490 RACK_OPTS_INC(tcp_gp_inc_ss);
23491 ss = optval;
23492 if (ss < 100) {
23494 * We don't allow any reduction
23495 * over the GP b/w.
23497 error = EINVAL;
23498 break;
23500 rack->r_ctl.rack_per_of_gp_ss = ss;
23501 rack_log_pacing_delay_calc(rack,
23502 rack->r_ctl.rack_per_of_gp_ss,
23503 rack->r_ctl.rack_per_of_gp_ca,
23504 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
23505 __LINE__, NULL, 0);
23506 break;
23507 case TCP_RACK_RR_CONF:
23508 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
23509 if (optval && optval <= 3)
23510 rack->r_rr_config = optval;
23511 else
23512 rack->r_rr_config = 0;
23513 break;
23514 case TCP_PACING_DND: /* URL:dnd */
23515 if (optval > 0)
23516 rack->rc_pace_dnd = 1;
23517 else
23518 rack->rc_pace_dnd = 0;
23519 break;
23520 case TCP_HDWR_RATE_CAP:
23521 RACK_OPTS_INC(tcp_hdwr_rate_cap);
23522 if (optval) {
23523 if (rack->r_rack_hw_rate_caps == 0)
23524 rack->r_rack_hw_rate_caps = 1;
23525 else
23526 error = EALREADY;
23527 } else {
23528 rack->r_rack_hw_rate_caps = 0;
23530 break;
23531 case TCP_DGP_UPPER_BOUNDS:
23533 uint8_t val;
23534 val = optval & 0x0000ff;
23535 rack->r_ctl.rack_per_upper_bound_ca = val;
23536 val = (optval >> 16) & 0x0000ff;
23537 rack->r_ctl.rack_per_upper_bound_ss = val;
23538 break;
23540 case TCP_SS_EEXIT: /* URL:eexit */
23541 if (optval > 0) {
23542 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff;
23543 if (optval & 0x10000) {
23544 rack->r_ctl.gate_to_fs = 1;
23545 } else {
23546 rack->r_ctl.gate_to_fs = 0;
23548 if (optval & 0x20000) {
23549 rack->r_ctl.use_gp_not_last = 1;
23550 } else {
23551 rack->r_ctl.use_gp_not_last = 0;
23553 if (optval & 0xfffc0000) {
23554 uint32_t v;
23556 v = (optval >> 18) & 0x00003fff;
23557 if (v >= 1000)
23558 rack->r_ctl.gp_gain_req = v;
23560 } else {
23561 /* We do not do ss early exit at all */
23562 rack->rc_initial_ss_comp = 1;
23563 rack->r_ctl.gp_rnd_thresh = 0;
23565 break;
23566 case TCP_RACK_SPLIT_LIMIT:
23567 RACK_OPTS_INC(tcp_split_limit);
23568 rack->r_ctl.rc_split_limit = optval;
23569 break;
23570 case TCP_BBR_HDWR_PACE:
23571 RACK_OPTS_INC(tcp_hdwr_pacing);
23572 if (optval){
23573 if (rack->rack_hdrw_pacing == 0) {
23574 rack->rack_hdw_pace_ena = 1;
23575 rack->rack_attempt_hdwr_pace = 0;
23576 } else
23577 error = EALREADY;
23578 } else {
23579 rack->rack_hdw_pace_ena = 0;
23580 #ifdef RATELIMIT
23581 if (rack->r_ctl.crte != NULL) {
23582 rack->rack_hdrw_pacing = 0;
23583 rack->rack_attempt_hdwr_pace = 0;
23584 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
23585 rack->r_ctl.crte = NULL;
23587 #endif
23589 break;
23590 /* End Pacing related ones */
23591 case TCP_RACK_PRR_SENDALOT:
23592 /* Allow PRR to send more than one seg */
23593 RACK_OPTS_INC(tcp_rack_prr_sendalot);
23594 rack->r_ctl.rc_prr_sendalot = optval;
23595 break;
23596 case TCP_RACK_MIN_TO:
23597 /* Minimum time between rack t-o's in ms */
23598 RACK_OPTS_INC(tcp_rack_min_to);
23599 rack->r_ctl.rc_min_to = optval;
23600 break;
23601 case TCP_RACK_EARLY_SEG:
23602 /* If early recovery max segments */
23603 RACK_OPTS_INC(tcp_rack_early_seg);
23604 rack->r_ctl.rc_early_recovery_segs = optval;
23605 break;
23606 case TCP_RACK_ENABLE_HYSTART:
23608 if (optval) {
23609 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
23610 if (rack_do_hystart > RACK_HYSTART_ON)
23611 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
23612 if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
23613 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
23614 } else {
23615 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
23618 break;
23619 case TCP_RACK_REORD_THRESH:
23620 /* RACK reorder threshold (shift amount) */
23621 RACK_OPTS_INC(tcp_rack_reord_thresh);
23622 if ((optval > 0) && (optval < 31))
23623 rack->r_ctl.rc_reorder_shift = optval;
23624 else
23625 error = EINVAL;
23626 break;
23627 case TCP_RACK_REORD_FADE:
23628 /* Does reordering fade after ms time */
23629 RACK_OPTS_INC(tcp_rack_reord_fade);
23630 rack->r_ctl.rc_reorder_fade = optval;
23631 break;
23632 case TCP_RACK_TLP_THRESH:
23633 /* RACK TLP theshold i.e. srtt+(srtt/N) */
23634 RACK_OPTS_INC(tcp_rack_tlp_thresh);
23635 if (optval)
23636 rack->r_ctl.rc_tlp_threshold = optval;
23637 else
23638 error = EINVAL;
23639 break;
23640 case TCP_BBR_USE_RACK_RR:
23641 RACK_OPTS_INC(tcp_rack_rr);
23642 if (optval)
23643 rack->use_rack_rr = 1;
23644 else
23645 rack->use_rack_rr = 0;
23646 break;
23647 case TCP_RACK_PKT_DELAY:
23648 /* RACK added ms i.e. rack-rtt + reord + N */
23649 RACK_OPTS_INC(tcp_rack_pkt_delay);
23650 rack->r_ctl.rc_pkt_delay = optval;
23651 break;
23652 case TCP_DELACK:
23653 RACK_OPTS_INC(tcp_rack_delayed_ack);
23654 if (optval == 0)
23655 tp->t_delayed_ack = 0;
23656 else
23657 tp->t_delayed_ack = 1;
23658 if (tp->t_flags & TF_DELACK) {
23659 tp->t_flags &= ~TF_DELACK;
23660 tp->t_flags |= TF_ACKNOW;
23661 NET_EPOCH_ENTER(et);
23662 rack_output(tp);
23663 NET_EPOCH_EXIT(et);
23665 break;
23667 case TCP_BBR_RACK_RTT_USE:
23668 RACK_OPTS_INC(tcp_rack_rtt_use);
23669 if ((optval != USE_RTT_HIGH) &&
23670 (optval != USE_RTT_LOW) &&
23671 (optval != USE_RTT_AVG))
23672 error = EINVAL;
23673 else
23674 rack->r_ctl.rc_rate_sample_method = optval;
23675 break;
23676 case TCP_HONOR_HPTS_MIN:
23677 RACK_OPTS_INC(tcp_honor_hpts);
23678 if (optval) {
23679 rack->r_use_hpts_min = 1;
23681 * Must be between 2 - 80% to be a reduction else
23682 * we keep the default (10%).
23684 if ((optval > 1) && (optval <= 80)) {
23685 rack->r_ctl.max_reduction = optval;
23687 } else
23688 rack->r_use_hpts_min = 0;
23689 break;
23690 case TCP_REC_IS_DYN: /* URL:dynrec */
23691 RACK_OPTS_INC(tcp_dyn_rec);
23692 if (optval)
23693 rack->rc_gp_no_rec_chg = 1;
23694 else
23695 rack->rc_gp_no_rec_chg = 0;
23696 break;
23697 case TCP_NO_TIMELY:
23698 RACK_OPTS_INC(tcp_notimely);
23699 if (optval) {
23700 rack->rc_skip_timely = 1;
23701 rack->r_ctl.rack_per_of_gp_rec = 90;
23702 rack->r_ctl.rack_per_of_gp_ca = 100;
23703 rack->r_ctl.rack_per_of_gp_ss = 250;
23704 } else {
23705 rack->rc_skip_timely = 0;
23707 break;
23708 case TCP_GP_USE_LTBW:
23709 if (optval == 0) {
23710 rack->use_lesser_lt_bw = 0;
23711 rack->dis_lt_bw = 1;
23712 } else if (optval == 1) {
23713 rack->use_lesser_lt_bw = 1;
23714 rack->dis_lt_bw = 0;
23715 } else if (optval == 2) {
23716 rack->use_lesser_lt_bw = 0;
23717 rack->dis_lt_bw = 0;
23719 break;
23720 case TCP_DATA_AFTER_CLOSE:
23721 RACK_OPTS_INC(tcp_data_after_close);
23722 if (optval)
23723 rack->rc_allow_data_af_clo = 1;
23724 else
23725 rack->rc_allow_data_af_clo = 0;
23726 break;
23727 default:
23728 break;
23730 tcp_log_socket_option(tp, sopt_name, optval, error);
23731 return (error);
23734 static void
23735 rack_inherit(struct tcpcb *tp, struct inpcb *parent)
23738 * A new connection has been created (tp) and
23739 * the parent is the inpcb given. We want to
23740 * apply a read-lock to the parent (we are already
23741 * holding a write lock on the tp) and copy anything
23742 * out of the rack specific data as long as its tfb is
23743 * the same as ours i.e. we are the same stack. Otherwise
23744 * we just return.
23746 struct tcpcb *par;
23747 struct tcp_rack *dest, *src;
23748 int cnt = 0;
23750 par = intotcpcb(parent);
23751 if (par->t_fb != tp->t_fb) {
23752 /* Not the same stack */
23753 tcp_log_socket_option(tp, 0, 0, 1);
23754 return;
23756 /* Ok if we reach here lets setup the two rack pointers */
23757 dest = (struct tcp_rack *)tp->t_fb_ptr;
23758 src = (struct tcp_rack *)par->t_fb_ptr;
23759 if ((src == NULL) || (dest == NULL)) {
23760 /* Huh? */
23761 tcp_log_socket_option(tp, 0, 0, 2);
23762 return;
23764 /* Now copy out anything we wish to inherit i.e. things in socket-options */
23765 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */
23766 if ((src->dgp_on) && (dest->dgp_on == 0)) {
23767 /* Profile 1 had to be set via sock opt */
23768 rack_set_dgp(dest);
23769 cnt++;
23771 /* TCP_RACK_SET_RXT_OPTIONS */
23772 if (dest->full_size_rxt != src->full_size_rxt) {
23773 dest->full_size_rxt = src->full_size_rxt;
23774 cnt++;
23776 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) {
23777 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min;
23778 cnt++;
23780 /* TCP_RACK_DSACK_OPT */
23781 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) {
23782 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based;
23783 cnt++;
23785 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) {
23786 dest->rc_rack_use_dsack = src->rc_rack_use_dsack;
23787 cnt++;
23789 /* TCP_RACK_PACING_DIVISOR */
23790 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) {
23791 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor;
23792 cnt++;
23794 /* TCP_RACK_HI_BETA */
23795 if (src->rack_hibeta != dest->rack_hibeta) {
23796 cnt++;
23797 if (src->rack_hibeta) {
23798 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta;
23799 dest->rack_hibeta = 1;
23800 } else {
23801 dest->rack_hibeta = 0;
23804 /* TCP_RACK_TIMER_SLOP */
23805 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) {
23806 dest->r_ctl.timer_slop = src->r_ctl.timer_slop;
23807 cnt++;
23809 /* TCP_RACK_PACING_BETA_ECN */
23810 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) {
23811 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn;
23812 cnt++;
23814 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) {
23815 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags;
23816 cnt++;
23818 /* We do not do TCP_DEFER_OPTIONS */
23819 /* TCP_RACK_MEASURE_CNT */
23820 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) {
23821 dest->r_ctl.req_measurements = src->r_ctl.req_measurements;
23822 cnt++;
23824 /* TCP_HDWR_UP_ONLY */
23825 if (dest->r_up_only != src->r_up_only) {
23826 dest->r_up_only = src->r_up_only;
23827 cnt++;
23829 /* TCP_FILLCW_RATE_CAP */
23830 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) {
23831 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap;
23832 cnt++;
23834 /* TCP_PACING_RATE_CAP */
23835 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) {
23836 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap;
23837 cnt++;
23839 /* A listener can't set TCP_HYBRID_PACING */
23840 /* TCP_SIDECHAN_DIS */
23841 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) {
23842 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask;
23843 cnt++;
23845 /* TCP_SHARED_CWND_TIME_LIMIT */
23846 if (dest->r_limit_scw != src->r_limit_scw) {
23847 dest->r_limit_scw = src->r_limit_scw;
23848 cnt++;
23850 /* TCP_RACK_PACE_TO_FILL */
23851 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) {
23852 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd;
23853 cnt++;
23855 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) {
23856 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range;
23857 cnt++;
23859 if (dest->rtt_limit_mul != src->rtt_limit_mul) {
23860 dest->rtt_limit_mul = src->rtt_limit_mul;
23861 cnt++;
23863 /* TCP_RACK_NO_PUSH_AT_MAX */
23864 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) {
23865 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt;
23866 cnt++;
23868 /* TCP_SHARED_CWND_ENABLE */
23869 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) {
23870 dest->rack_enable_scwnd = src->rack_enable_scwnd;
23871 cnt++;
23873 /* TCP_USE_CMP_ACKS */
23874 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) {
23875 dest->r_use_cmp_ack = src->r_use_cmp_ack;
23876 cnt++;
23879 if (dest->r_mbuf_queue != src->r_mbuf_queue) {
23880 dest->r_mbuf_queue = src->r_mbuf_queue;
23881 cnt++;
23883 /* TCP_RACK_MBUF_QUEUE */
23884 if (dest->r_mbuf_queue != src->r_mbuf_queue) {
23885 dest->r_mbuf_queue = src->r_mbuf_queue;
23886 cnt++;
23888 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) {
23889 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
23890 } else {
23891 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
23893 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) {
23894 tp->t_flags2 |= TF2_MBUF_ACKCMP;
23896 /* TCP_RACK_NONRXT_CFG_RATE */
23897 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) {
23898 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr;
23899 cnt++;
23901 /* TCP_NO_PRR */
23902 if (dest->rack_no_prr != src->rack_no_prr) {
23903 dest->rack_no_prr = src->rack_no_prr;
23904 cnt++;
23906 if (dest->no_prr_addback != src->no_prr_addback) {
23907 dest->no_prr_addback = src->no_prr_addback;
23908 cnt++;
23910 /* RACK_CSPR_IS_FCC */
23911 if (dest->cspr_is_fcc != src->cspr_is_fcc) {
23912 dest->cspr_is_fcc = src->cspr_is_fcc;
23913 cnt++;
23915 /* TCP_TIMELY_DYN_ADJ */
23916 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) {
23917 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul;
23918 cnt++;
23920 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) {
23921 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca;
23922 cnt++;
23924 /* TCP_RACK_TLP_USE */
23925 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) {
23926 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use;
23927 cnt++;
23929 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */
23930 /* TCP_BBR_RACK_INIT_RATE */
23931 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) {
23932 dest->r_ctl.init_rate = src->r_ctl.init_rate;
23933 cnt++;
23935 /* TCP_RACK_FORCE_MSEG */
23936 if (dest->rc_force_max_seg != src->rc_force_max_seg) {
23937 dest->rc_force_max_seg = src->rc_force_max_seg;
23938 cnt++;
23940 /* TCP_RACK_PACE_MIN_SEG */
23941 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) {
23942 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs;
23943 cnt++;
23945 /* we don't allow TCP_RACK_PACE_MAX_SEG */
23946 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */
23947 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) {
23948 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca;
23949 cnt++;
23951 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) {
23952 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss;
23953 cnt++;
23955 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) {
23956 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec;
23957 cnt++;
23959 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */
23960 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) {
23961 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec;
23962 cnt++;
23964 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) {
23965 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca;
23966 cnt++;
23969 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) {
23970 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss;
23971 cnt++;
23973 /* TCP_RACK_RR_CONF */
23974 if (dest->r_rr_config != src->r_rr_config) {
23975 dest->r_rr_config = src->r_rr_config;
23976 cnt++;
23978 /* TCP_PACING_DND */
23979 if (dest->rc_pace_dnd != src->rc_pace_dnd) {
23980 dest->rc_pace_dnd = src->rc_pace_dnd;
23981 cnt++;
23983 /* TCP_HDWR_RATE_CAP */
23984 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) {
23985 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps;
23986 cnt++;
23988 /* TCP_DGP_UPPER_BOUNDS */
23989 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) {
23990 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca;
23991 cnt++;
23993 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) {
23994 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss;
23995 cnt++;
23997 /* TCP_SS_EEXIT */
23998 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) {
23999 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh;
24000 cnt++;
24002 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) {
24003 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs;
24004 cnt++;
24006 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) {
24007 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last;
24008 cnt++;
24010 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) {
24011 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req;
24012 cnt++;
24014 /* TCP_BBR_HDWR_PACE */
24015 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) {
24016 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena;
24017 cnt++;
24019 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) {
24020 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace;
24021 cnt++;
24023 /* TCP_RACK_PRR_SENDALOT */
24024 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) {
24025 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot;
24026 cnt++;
24028 /* TCP_RACK_MIN_TO */
24029 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) {
24030 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to;
24031 cnt++;
24033 /* TCP_RACK_EARLY_SEG */
24034 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) {
24035 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs;
24036 cnt++;
24038 /* TCP_RACK_ENABLE_HYSTART */
24039 if (par->t_ccv.flags != tp->t_ccv.flags) {
24040 cnt++;
24041 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) {
24042 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
24043 if (rack_do_hystart > RACK_HYSTART_ON)
24044 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
24045 if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
24046 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
24047 } else {
24048 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
24051 /* TCP_RACK_REORD_THRESH */
24052 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) {
24053 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift;
24054 cnt++;
24056 /* TCP_RACK_REORD_FADE */
24057 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) {
24058 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade;
24059 cnt++;
24061 /* TCP_RACK_TLP_THRESH */
24062 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) {
24063 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold;
24064 cnt++;
24066 /* TCP_BBR_USE_RACK_RR */
24067 if (dest->use_rack_rr != src->use_rack_rr) {
24068 dest->use_rack_rr = src->use_rack_rr;
24069 cnt++;
24071 /* TCP_RACK_PKT_DELAY */
24072 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) {
24073 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay;
24074 cnt++;
24076 /* TCP_DELACK will get copied via the main code if applicable */
24077 /* TCP_BBR_RACK_RTT_USE */
24078 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) {
24079 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method;
24080 cnt++;
24082 /* TCP_HONOR_HPTS_MIN */
24083 if (dest->r_use_hpts_min != src->r_use_hpts_min) {
24084 dest->r_use_hpts_min = src->r_use_hpts_min;
24085 cnt++;
24087 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) {
24088 dest->r_ctl.max_reduction = src->r_ctl.max_reduction;
24089 cnt++;
24091 /* TCP_REC_IS_DYN */
24092 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) {
24093 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg;
24094 cnt++;
24096 if (dest->rc_skip_timely != src->rc_skip_timely) {
24097 dest->rc_skip_timely = src->rc_skip_timely;
24098 cnt++;
24100 /* TCP_DATA_AFTER_CLOSE */
24101 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) {
24102 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo;
24103 cnt++;
24105 /* TCP_GP_USE_LTBW */
24106 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) {
24107 dest->use_lesser_lt_bw = src->use_lesser_lt_bw;
24108 cnt++;
24110 if (dest->dis_lt_bw != src->dis_lt_bw) {
24111 dest->dis_lt_bw = src->dis_lt_bw;
24112 cnt++;
24114 tcp_log_socket_option(tp, 0, cnt, 0);
24118 static void
24119 rack_apply_deferred_options(struct tcp_rack *rack)
24121 struct deferred_opt_list *dol, *sdol;
24122 uint32_t s_optval;
24124 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
24125 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
24126 /* Disadvantage of deferal is you loose the error return */
24127 s_optval = (uint32_t)dol->optval;
24128 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL);
24129 free(dol, M_TCPDO);
24133 static void
24134 rack_hw_tls_change(struct tcpcb *tp, int chg)
24136 /* Update HW tls state */
24137 struct tcp_rack *rack;
24139 rack = (struct tcp_rack *)tp->t_fb_ptr;
24140 if (chg)
24141 rack->r_ctl.fsb.hw_tls = 1;
24142 else
24143 rack->r_ctl.fsb.hw_tls = 0;
24146 static int
24147 rack_pru_options(struct tcpcb *tp, int flags)
24149 if (flags & PRUS_OOB)
24150 return (EOPNOTSUPP);
24151 return (0);
24154 static bool
24155 rack_wake_check(struct tcpcb *tp)
24157 struct tcp_rack *rack;
24158 struct timeval tv;
24159 uint32_t cts;
24161 rack = (struct tcp_rack *)tp->t_fb_ptr;
24162 if (rack->r_ctl.rc_hpts_flags) {
24163 cts = tcp_get_usecs(&tv);
24164 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){
24166 * Pacing timer is up, check if we are ready.
24168 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to))
24169 return (true);
24170 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) {
24172 * A timer is up, check if we are ready.
24174 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp))
24175 return (true);
24178 return (false);
24181 static struct tcp_function_block __tcp_rack = {
24182 .tfb_tcp_block_name = __XSTRING(STACKNAME),
24183 .tfb_tcp_output = rack_output,
24184 .tfb_do_queued_segments = ctf_do_queued_segments,
24185 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
24186 .tfb_tcp_do_segment = rack_do_segment,
24187 .tfb_tcp_ctloutput = rack_ctloutput,
24188 .tfb_tcp_fb_init = rack_init,
24189 .tfb_tcp_fb_fini = rack_fini,
24190 .tfb_tcp_timer_stop_all = rack_stopall,
24191 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
24192 .tfb_tcp_handoff_ok = rack_handoff_ok,
24193 .tfb_tcp_mtu_chg = rack_mtu_change,
24194 .tfb_pru_options = rack_pru_options,
24195 .tfb_hwtls_change = rack_hw_tls_change,
24196 .tfb_chg_query = rack_chg_query,
24197 .tfb_switch_failed = rack_switch_failed,
24198 .tfb_early_wake_check = rack_wake_check,
24199 .tfb_compute_pipe = rack_compute_pipe,
24200 .tfb_stack_info = rack_stack_information,
24201 .tfb_inherit = rack_inherit,
24202 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK,
24207 * rack_ctloutput() must drop the inpcb lock before performing copyin on
24208 * socket option arguments. When it re-acquires the lock after the copy, it
24209 * has to revalidate that the connection is still valid for the socket
24210 * option.
24212 static int
24213 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt)
24215 struct inpcb *inp = tptoinpcb(tp);
24216 #ifdef INET
24217 struct ip *ip;
24218 #endif
24219 struct tcp_rack *rack;
24220 struct tcp_hybrid_req hybrid;
24221 uint64_t loptval;
24222 int32_t error = 0, optval;
24224 rack = (struct tcp_rack *)tp->t_fb_ptr;
24225 if (rack == NULL) {
24226 INP_WUNLOCK(inp);
24227 return (EINVAL);
24229 #ifdef INET
24230 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
24231 #endif
24233 switch (sopt->sopt_level) {
24234 #ifdef INET6
24235 case IPPROTO_IPV6:
24236 MPASS(inp->inp_vflag & INP_IPV6PROTO);
24237 switch (sopt->sopt_name) {
24238 case IPV6_USE_MIN_MTU:
24239 tcp6_use_min_mtu(tp);
24240 break;
24242 INP_WUNLOCK(inp);
24243 return (0);
24244 #endif
24245 #ifdef INET
24246 case IPPROTO_IP:
24247 switch (sopt->sopt_name) {
24248 case IP_TOS:
24250 * The DSCP codepoint has changed, update the fsb.
24252 ip->ip_tos = rack->rc_inp->inp_ip_tos;
24253 break;
24254 case IP_TTL:
24256 * The TTL has changed, update the fsb.
24258 ip->ip_ttl = rack->rc_inp->inp_ip_ttl;
24259 break;
24261 INP_WUNLOCK(inp);
24262 return (0);
24263 #endif
24264 #ifdef SO_PEERPRIO
24265 case SOL_SOCKET:
24266 switch (sopt->sopt_name) {
24267 case SO_PEERPRIO: /* SC-URL:bs */
24268 /* Already read in and sanity checked in sosetopt(). */
24269 if (inp->inp_socket) {
24270 rack->client_bufferlvl = inp->inp_socket->so_peerprio;
24272 break;
24274 INP_WUNLOCK(inp);
24275 return (0);
24276 #endif
24277 case IPPROTO_TCP:
24278 switch (sopt->sopt_name) {
24279 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
24280 /* Pacing related ones */
24281 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
24282 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
24283 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */
24284 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
24285 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
24286 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
24287 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
24288 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
24289 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
24290 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
24291 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
24292 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
24293 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
24294 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */
24295 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */
24296 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
24297 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */
24298 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
24299 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
24300 /* End pacing related */
24301 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
24302 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
24303 case TCP_RACK_MIN_TO: /* URL:min_to */
24304 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
24305 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
24306 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
24307 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
24308 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
24309 case TCP_RACK_TLP_USE: /* URL:tlp_use */
24310 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
24311 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
24312 case TCP_NO_PRR: /* URL:noprr */
24313 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
24314 case TCP_DATA_AFTER_CLOSE: /* no URL */
24315 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
24316 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
24317 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
24318 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
24319 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
24320 case TCP_RACK_PROFILE: /* URL:profile */
24321 case TCP_SIDECHAN_DIS: /* URL:scodm */
24322 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */
24323 case TCP_USE_CMP_ACKS: /* URL:cmpack */
24324 case TCP_RACK_ABC_VAL: /* URL:labc */
24325 case TCP_REC_ABC_VAL: /* URL:reclabc */
24326 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
24327 case TCP_DEFER_OPTIONS: /* URL:defer */
24328 case TCP_RACK_DSACK_OPT: /* URL:dsack */
24329 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */
24330 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */
24331 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */
24332 case TCP_RACK_HI_BETA: /* URL:hibeta */
24333 case TCP_RACK_SPLIT_LIMIT: /* URL:split */
24334 case TCP_SS_EEXIT: /* URL:eexit */
24335 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */
24336 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */
24337 case TCP_PACING_DND: /* URL:dnd */
24338 case TCP_NO_TIMELY: /* URL:notimely */
24339 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */
24340 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */
24341 case TCP_REC_IS_DYN: /* URL:dynrec */
24342 case TCP_GP_USE_LTBW: /* URL:useltbw */
24343 goto process_opt;
24344 break;
24345 default:
24346 /* Filter off all unknown options to the base stack */
24347 return (tcp_default_ctloutput(tp, sopt));
24348 break;
24350 default:
24351 INP_WUNLOCK(inp);
24352 return (0);
24354 process_opt:
24355 INP_WUNLOCK(inp);
24356 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) ||
24357 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) {
24358 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
24360 * We truncate it down to 32 bits for the socket-option trace this
24361 * means rates > 34Gbps won't show right, but thats probably ok.
24363 optval = (uint32_t)loptval;
24364 } else if (sopt->sopt_name == TCP_HYBRID_PACING) {
24365 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid));
24366 } else {
24367 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
24368 /* Save it in 64 bit form too */
24369 loptval = optval;
24371 if (error)
24372 return (error);
24373 INP_WLOCK(inp);
24374 if (tp->t_fb != &__tcp_rack) {
24375 INP_WUNLOCK(inp);
24376 return (ENOPROTOOPT);
24378 if (rack->defer_options && (rack->gp_ready == 0) &&
24379 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
24380 (sopt->sopt_name != TCP_HYBRID_PACING) &&
24381 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) &&
24382 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
24383 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
24384 /* Options are being deferred */
24385 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
24386 INP_WUNLOCK(inp);
24387 return (0);
24388 } else {
24389 /* No memory to defer, fail */
24390 INP_WUNLOCK(inp);
24391 return (ENOMEM);
24394 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid);
24395 INP_WUNLOCK(inp);
24396 return (error);
24399 static void
24400 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
24403 INP_WLOCK_ASSERT(tptoinpcb(tp));
24404 bzero(ti, sizeof(*ti));
24406 ti->tcpi_state = tp->t_state;
24407 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
24408 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
24409 if (tp->t_flags & TF_SACK_PERMIT)
24410 ti->tcpi_options |= TCPI_OPT_SACK;
24411 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
24412 ti->tcpi_options |= TCPI_OPT_WSCALE;
24413 ti->tcpi_snd_wscale = tp->snd_scale;
24414 ti->tcpi_rcv_wscale = tp->rcv_scale;
24416 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
24417 ti->tcpi_options |= TCPI_OPT_ECN;
24418 if (tp->t_flags & TF_FASTOPEN)
24419 ti->tcpi_options |= TCPI_OPT_TFO;
24420 /* still kept in ticks is t_rcvtime */
24421 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
24422 /* Since we hold everything in precise useconds this is easy */
24423 ti->tcpi_rtt = tp->t_srtt;
24424 ti->tcpi_rttvar = tp->t_rttvar;
24425 ti->tcpi_rto = tp->t_rxtcur;
24426 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
24427 ti->tcpi_snd_cwnd = tp->snd_cwnd;
24429 * FreeBSD-specific extension fields for tcp_info.
24431 ti->tcpi_rcv_space = tp->rcv_wnd;
24432 ti->tcpi_rcv_nxt = tp->rcv_nxt;
24433 ti->tcpi_snd_wnd = tp->snd_wnd;
24434 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
24435 ti->tcpi_snd_nxt = tp->snd_nxt;
24436 ti->tcpi_snd_mss = tp->t_maxseg;
24437 ti->tcpi_rcv_mss = tp->t_maxseg;
24438 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
24439 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
24440 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
24441 ti->tcpi_total_tlp = tp->t_sndtlppack;
24442 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
24443 ti->tcpi_rttmin = tp->t_rttlow;
24444 #ifdef NETFLIX_STATS
24445 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
24446 #endif
24447 #ifdef TCP_OFFLOAD
24448 if (tp->t_flags & TF_TOE) {
24449 ti->tcpi_options |= TCPI_OPT_TOE;
24450 tcp_offload_tcp_info(tp, ti);
24452 #endif
24455 static int
24456 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt)
24458 struct inpcb *inp = tptoinpcb(tp);
24459 struct tcp_rack *rack;
24460 int32_t error, optval;
24461 uint64_t val, loptval;
24462 struct tcp_info ti;
24464 * Because all our options are either boolean or an int, we can just
24465 * pull everything into optval and then unlock and copy. If we ever
24466 * add a option that is not a int, then this will have quite an
24467 * impact to this routine.
24469 error = 0;
24470 rack = (struct tcp_rack *)tp->t_fb_ptr;
24471 if (rack == NULL) {
24472 INP_WUNLOCK(inp);
24473 return (EINVAL);
24475 switch (sopt->sopt_name) {
24476 case TCP_INFO:
24477 /* First get the info filled */
24478 rack_fill_info(tp, &ti);
24479 /* Fix up the rtt related fields if needed */
24480 INP_WUNLOCK(inp);
24481 error = sooptcopyout(sopt, &ti, sizeof ti);
24482 return (error);
24484 * Beta is the congestion control value for NewReno that influences how
24485 * much of a backoff happens when loss is detected. It is normally set
24486 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
24487 * when you exit recovery.
24489 case TCP_RACK_PACING_BETA:
24490 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
24491 error = EINVAL;
24492 else if (rack->rc_pacing_cc_set == 0)
24493 optval = rack->r_ctl.rc_saved_beta.beta;
24494 else {
24496 * Reach out into the CC data and report back what
24497 * I have previously set. Yeah it looks hackish but
24498 * we don't want to report the saved values.
24500 if (tp->t_ccv.cc_data)
24501 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta;
24502 else
24503 error = EINVAL;
24505 break;
24507 * Beta_ecn is the congestion control value for NewReno that influences how
24508 * much of a backoff happens when a ECN mark is detected. It is normally set
24509 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
24510 * you exit recovery. Note that classic ECN has a beta of 50, it is only
24511 * ABE Ecn that uses this "less" value, but we do too with pacing :)
24513 case TCP_RACK_PACING_BETA_ECN:
24514 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
24515 error = EINVAL;
24516 else if (rack->rc_pacing_cc_set == 0)
24517 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
24518 else {
24520 * Reach out into the CC data and report back what
24521 * I have previously set. Yeah it looks hackish but
24522 * we don't want to report the saved values.
24524 if (tp->t_ccv.cc_data)
24525 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn;
24526 else
24527 error = EINVAL;
24529 break;
24530 case TCP_RACK_DSACK_OPT:
24531 optval = 0;
24532 if (rack->rc_rack_tmr_std_based) {
24533 optval |= 1;
24535 if (rack->rc_rack_use_dsack) {
24536 optval |= 2;
24538 break;
24539 case TCP_RACK_ENABLE_HYSTART:
24541 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) {
24542 optval = RACK_HYSTART_ON;
24543 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND)
24544 optval = RACK_HYSTART_ON_W_SC;
24545 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH)
24546 optval = RACK_HYSTART_ON_W_SC_C;
24547 } else {
24548 optval = RACK_HYSTART_OFF;
24551 break;
24552 case TCP_RACK_DGP_IN_REC:
24553 error = EINVAL;
24554 break;
24555 case TCP_RACK_HI_BETA:
24556 optval = rack->rack_hibeta;
24557 break;
24558 case TCP_DEFER_OPTIONS:
24559 optval = rack->defer_options;
24560 break;
24561 case TCP_RACK_MEASURE_CNT:
24562 optval = rack->r_ctl.req_measurements;
24563 break;
24564 case TCP_REC_ABC_VAL:
24565 optval = rack->r_use_labc_for_rec;
24566 break;
24567 case TCP_RACK_ABC_VAL:
24568 optval = rack->rc_labc;
24569 break;
24570 case TCP_HDWR_UP_ONLY:
24571 optval= rack->r_up_only;
24572 break;
24573 case TCP_FILLCW_RATE_CAP:
24574 loptval = rack->r_ctl.fillcw_cap;
24575 break;
24576 case TCP_PACING_RATE_CAP:
24577 loptval = rack->r_ctl.bw_rate_cap;
24578 break;
24579 case TCP_RACK_PROFILE:
24580 /* You cannot retrieve a profile, its write only */
24581 error = EINVAL;
24582 break;
24583 case TCP_SIDECHAN_DIS:
24584 optval = rack->r_ctl.side_chan_dis_mask;
24585 break;
24586 case TCP_HYBRID_PACING:
24587 /* You cannot retrieve hybrid pacing information, its write only */
24588 error = EINVAL;
24589 break;
24590 case TCP_USE_CMP_ACKS:
24591 optval = rack->r_use_cmp_ack;
24592 break;
24593 case TCP_RACK_PACE_TO_FILL:
24594 optval = rack->rc_pace_to_cwnd;
24595 break;
24596 case TCP_RACK_NO_PUSH_AT_MAX:
24597 optval = rack->r_ctl.rc_no_push_at_mrtt;
24598 break;
24599 case TCP_SHARED_CWND_ENABLE:
24600 optval = rack->rack_enable_scwnd;
24601 break;
24602 case TCP_RACK_NONRXT_CFG_RATE:
24603 optval = rack->rack_rec_nonrxt_use_cr;
24604 break;
24605 case TCP_NO_PRR:
24606 if (rack->rack_no_prr == 1)
24607 optval = 1;
24608 else if (rack->no_prr_addback == 1)
24609 optval = 2;
24610 else
24611 optval = 0;
24612 break;
24613 case TCP_GP_USE_LTBW:
24614 if (rack->dis_lt_bw) {
24615 /* It is not used */
24616 optval = 0;
24617 } else if (rack->use_lesser_lt_bw) {
24618 /* we use min() */
24619 optval = 1;
24620 } else {
24621 /* we use max() */
24622 optval = 2;
24624 break;
24625 case TCP_RACK_DO_DETECTION:
24626 error = EINVAL;
24627 break;
24628 case TCP_RACK_MBUF_QUEUE:
24629 /* Now do we use the LRO mbuf-queue feature */
24630 optval = rack->r_mbuf_queue;
24631 break;
24632 case RACK_CSPR_IS_FCC:
24633 optval = rack->cspr_is_fcc;
24634 break;
24635 case TCP_TIMELY_DYN_ADJ:
24636 optval = rack->rc_gp_dyn_mul;
24637 break;
24638 case TCP_BBR_IWINTSO:
24639 error = EINVAL;
24640 break;
24641 case TCP_RACK_TLP_REDUCE:
24642 /* RACK TLP cwnd reduction (bool) */
24643 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
24644 break;
24645 case TCP_BBR_RACK_INIT_RATE:
24646 val = rack->r_ctl.init_rate;
24647 /* convert to kbits per sec */
24648 val *= 8;
24649 val /= 1000;
24650 optval = (uint32_t)val;
24651 break;
24652 case TCP_RACK_FORCE_MSEG:
24653 optval = rack->rc_force_max_seg;
24654 break;
24655 case TCP_RACK_PACE_MIN_SEG:
24656 optval = rack->r_ctl.rc_user_set_min_segs;
24657 break;
24658 case TCP_RACK_PACE_MAX_SEG:
24659 /* Max segments in a pace */
24660 optval = rack->rc_user_set_max_segs;
24661 break;
24662 case TCP_RACK_PACE_ALWAYS:
24663 /* Use the always pace method */
24664 optval = rack->rc_always_pace;
24665 break;
24666 case TCP_RACK_PRR_SENDALOT:
24667 /* Allow PRR to send more than one seg */
24668 optval = rack->r_ctl.rc_prr_sendalot;
24669 break;
24670 case TCP_RACK_MIN_TO:
24671 /* Minimum time between rack t-o's in ms */
24672 optval = rack->r_ctl.rc_min_to;
24673 break;
24674 case TCP_RACK_SPLIT_LIMIT:
24675 optval = rack->r_ctl.rc_split_limit;
24676 break;
24677 case TCP_RACK_EARLY_SEG:
24678 /* If early recovery max segments */
24679 optval = rack->r_ctl.rc_early_recovery_segs;
24680 break;
24681 case TCP_RACK_REORD_THRESH:
24682 /* RACK reorder threshold (shift amount) */
24683 optval = rack->r_ctl.rc_reorder_shift;
24684 break;
24685 case TCP_SS_EEXIT:
24686 if (rack->r_ctl.gp_rnd_thresh) {
24687 uint32_t v;
24689 v = rack->r_ctl.gp_gain_req;
24690 v <<= 17;
24691 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff);
24692 if (rack->r_ctl.gate_to_fs == 1)
24693 optval |= 0x10000;
24694 } else
24695 optval = 0;
24696 break;
24697 case TCP_RACK_REORD_FADE:
24698 /* Does reordering fade after ms time */
24699 optval = rack->r_ctl.rc_reorder_fade;
24700 break;
24701 case TCP_BBR_USE_RACK_RR:
24702 /* Do we use the rack cheat for rxt */
24703 optval = rack->use_rack_rr;
24704 break;
24705 case TCP_RACK_RR_CONF:
24706 optval = rack->r_rr_config;
24707 break;
24708 case TCP_HDWR_RATE_CAP:
24709 optval = rack->r_rack_hw_rate_caps;
24710 break;
24711 case TCP_BBR_HDWR_PACE:
24712 optval = rack->rack_hdw_pace_ena;
24713 break;
24714 case TCP_RACK_TLP_THRESH:
24715 /* RACK TLP theshold i.e. srtt+(srtt/N) */
24716 optval = rack->r_ctl.rc_tlp_threshold;
24717 break;
24718 case TCP_RACK_PKT_DELAY:
24719 /* RACK added ms i.e. rack-rtt + reord + N */
24720 optval = rack->r_ctl.rc_pkt_delay;
24721 break;
24722 case TCP_RACK_TLP_USE:
24723 optval = rack->rack_tlp_threshold_use;
24724 break;
24725 case TCP_PACING_DND:
24726 optval = rack->rc_pace_dnd;
24727 break;
24728 case TCP_RACK_PACE_RATE_CA:
24729 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
24730 break;
24731 case TCP_RACK_PACE_RATE_SS:
24732 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
24733 break;
24734 case TCP_RACK_PACE_RATE_REC:
24735 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
24736 break;
24737 case TCP_DGP_UPPER_BOUNDS:
24738 optval = rack->r_ctl.rack_per_upper_bound_ss;
24739 optval <<= 16;
24740 optval |= rack->r_ctl.rack_per_upper_bound_ca;
24741 break;
24742 case TCP_RACK_GP_INCREASE_SS:
24743 optval = rack->r_ctl.rack_per_of_gp_ca;
24744 break;
24745 case TCP_RACK_GP_INCREASE_CA:
24746 optval = rack->r_ctl.rack_per_of_gp_ss;
24747 break;
24748 case TCP_RACK_PACING_DIVISOR:
24749 optval = rack->r_ctl.pace_len_divisor;
24750 break;
24751 case TCP_BBR_RACK_RTT_USE:
24752 optval = rack->r_ctl.rc_rate_sample_method;
24753 break;
24754 case TCP_DELACK:
24755 optval = tp->t_delayed_ack;
24756 break;
24757 case TCP_DATA_AFTER_CLOSE:
24758 optval = rack->rc_allow_data_af_clo;
24759 break;
24760 case TCP_SHARED_CWND_TIME_LIMIT:
24761 optval = rack->r_limit_scw;
24762 break;
24763 case TCP_HONOR_HPTS_MIN:
24764 if (rack->r_use_hpts_min)
24765 optval = rack->r_ctl.max_reduction;
24766 else
24767 optval = 0;
24768 break;
24769 case TCP_REC_IS_DYN:
24770 optval = rack->rc_gp_no_rec_chg;
24771 break;
24772 case TCP_NO_TIMELY:
24773 optval = rack->rc_skip_timely;
24774 break;
24775 case TCP_RACK_TIMER_SLOP:
24776 optval = rack->r_ctl.timer_slop;
24777 break;
24778 default:
24779 return (tcp_default_ctloutput(tp, sopt));
24780 break;
24782 INP_WUNLOCK(inp);
24783 if (error == 0) {
24784 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) ||
24785 (sopt->sopt_name == TCP_FILLCW_RATE_CAP))
24786 error = sooptcopyout(sopt, &loptval, sizeof loptval);
24787 else
24788 error = sooptcopyout(sopt, &optval, sizeof optval);
24790 return (error);
24793 static int
24794 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt)
24796 if (sopt->sopt_dir == SOPT_SET) {
24797 return (rack_set_sockopt(tp, sopt));
24798 } else if (sopt->sopt_dir == SOPT_GET) {
24799 return (rack_get_sockopt(tp, sopt));
24800 } else {
24801 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
24805 static const char *rack_stack_names[] = {
24806 __XSTRING(STACKNAME),
24807 #ifdef STACKALIAS
24808 __XSTRING(STACKALIAS),
24809 #endif
24812 static int
24813 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
24815 memset(mem, 0, size);
24816 return (0);
24819 static void
24820 rack_dtor(void *mem, int32_t size, void *arg)
24825 static bool rack_mod_inited = false;
24827 static int
24828 tcp_addrack(module_t mod, int32_t type, void *data)
24830 int32_t err = 0;
24831 int num_stacks;
24833 switch (type) {
24834 case MOD_LOAD:
24835 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
24836 sizeof(struct rack_sendmap),
24837 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
24839 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
24840 sizeof(struct tcp_rack),
24841 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
24843 sysctl_ctx_init(&rack_sysctl_ctx);
24844 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
24845 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
24846 OID_AUTO,
24847 #ifdef STACKALIAS
24848 __XSTRING(STACKALIAS),
24849 #else
24850 __XSTRING(STACKNAME),
24851 #endif
24852 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
24853 "");
24854 if (rack_sysctl_root == NULL) {
24855 printf("Failed to add sysctl node\n");
24856 err = EFAULT;
24857 goto free_uma;
24859 rack_init_sysctls();
24860 num_stacks = nitems(rack_stack_names);
24861 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
24862 rack_stack_names, &num_stacks);
24863 if (err) {
24864 printf("Failed to register %s stack name for "
24865 "%s module\n", rack_stack_names[num_stacks],
24866 __XSTRING(MODNAME));
24867 sysctl_ctx_free(&rack_sysctl_ctx);
24868 free_uma:
24869 uma_zdestroy(rack_zone);
24870 uma_zdestroy(rack_pcb_zone);
24871 rack_counter_destroy();
24872 printf("Failed to register rack module -- err:%d\n", err);
24873 return (err);
24875 tcp_lro_reg_mbufq();
24876 rack_mod_inited = true;
24877 break;
24878 case MOD_QUIESCE:
24879 err = deregister_tcp_functions(&__tcp_rack, true, false);
24880 break;
24881 case MOD_UNLOAD:
24882 err = deregister_tcp_functions(&__tcp_rack, false, true);
24883 if (err == EBUSY)
24884 break;
24885 if (rack_mod_inited) {
24886 uma_zdestroy(rack_zone);
24887 uma_zdestroy(rack_pcb_zone);
24888 sysctl_ctx_free(&rack_sysctl_ctx);
24889 rack_counter_destroy();
24890 rack_mod_inited = false;
24892 tcp_lro_dereg_mbufq();
24893 err = 0;
24894 break;
24895 default:
24896 return (EOPNOTSUPP);
24898 return (err);
24901 static moduledata_t tcp_rack = {
24902 .name = __XSTRING(MODNAME),
24903 .evhand = tcp_addrack,
24904 .priv = 0
24907 MODULE_VERSION(MODNAME, 1);
24908 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
24909 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
24911 #endif /* #if !defined(INET) && !defined(INET6) */