1 /* Copyright (c) 2017 The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \file circuitpadding.c
6 * \brief Circuit-level padding implementation
10 * This file implements Tor proposal 254 "Padding Negotiation" which is heavily
11 * inspired by the paper "Toward an Efficient Website Fingerprinting Defense"
12 * by M. Juarez, M. Imani, M. Perry, C. Diaz, M. Wright.
14 * In particular the code in this file describes mechanisms for clients to
15 * negotiate various types of circuit-level padding from relays.
17 * Each padding type is described by a state machine (circpad_machine_spec_t),
18 * which is also referred as a "padding machine" in this file. Currently,
19 * these state machines are hardcoded in the source code (e.g. see
20 * circpad_machines_init()), but in the future we will be able to
21 * serialize them in the torrc or the consensus.
23 * As specified by prop#254, clients can negotiate padding with relays by using
24 * PADDING_NEGOTIATE cells. After successful padding negotiation, padding
25 * machines are assigned to the circuit in their mutable form as a
26 * circpad_machine_runtime_t.
28 * Each state of a padding state machine can be either:
29 * - A histogram that specifies inter-arrival padding delays.
30 * - Or a parametrized probability distribution that specifies inter-arrival
31 * delays (see circpad_distribution_type_t).
33 * Padding machines start from the START state and finish with the END
34 * state. They can transition between states using the events in
37 * When a padding machine reaches the END state, it gets wiped from the circuit
38 * so that other padding machines can take over if needed (see
39 * circpad_machine_spec_transitioned_to_end()).
41 ****************************
44 * All used machines should be heap allocated and placed into
45 * origin_padding_machines/relay_padding_machines so that they get correctly
46 * cleaned up by the circpad_free_all() function.
49 #define CIRCUITPADDING_PRIVATE
52 #include "lib/math/fp.h"
53 #include "lib/math/prob_distr.h"
54 #include "core/or/or.h"
55 #include "core/or/circuitpadding.h"
56 #include "core/or/circuitpadding_machines.h"
57 #include "core/or/circuitlist.h"
58 #include "core/or/circuituse.h"
59 #include "core/mainloop/netstatus.h"
60 #include "core/or/relay.h"
61 #include "feature/stats/rephist.h"
62 #include "feature/nodelist/networkstatus.h"
64 #include "core/or/channel.h"
66 #include "lib/time/compat_time.h"
67 #include "lib/defs/time.h"
68 #include "lib/crypt_ops/crypto_rand.h"
70 #include "core/or/crypt_path_st.h"
71 #include "core/or/circuit_st.h"
72 #include "core/or/origin_circuit_st.h"
73 #include "core/or/or_circuit_st.h"
74 #include "feature/nodelist/routerstatus_st.h"
75 #include "feature/nodelist/node_st.h"
76 #include "core/or/cell_st.h"
77 #include "core/or/extend_info_st.h"
78 #include "core/crypto/relay_crypto.h"
79 #include "feature/nodelist/nodelist.h"
81 #include "src/core/or/conflux_util.h"
83 #include "app/config/config.h"
85 static inline circpad_circuit_state_t
circpad_circuit_state(
86 origin_circuit_t
*circ
);
87 static void circpad_setup_machine_on_circ(circuit_t
*on_circ
,
88 const circpad_machine_spec_t
*machine
);
89 static double circpad_distribution_sample(circpad_distribution_t dist
);
91 static inline void circpad_machine_update_state_length_for_nonpadding(
92 circpad_machine_runtime_t
*mi
);
94 /** Cached consensus params */
95 static uint8_t circpad_padding_disabled
;
96 static uint8_t circpad_padding_reduced
;
97 static uint8_t circpad_global_max_padding_percent
;
98 static uint16_t circpad_global_allowed_cells
;
99 static uint16_t circpad_max_circ_queued_cells
;
101 /** Global cell counts, for rate limiting */
102 static uint64_t circpad_global_padding_sent
;
103 static uint64_t circpad_global_nonpadding_sent
;
105 /** This is the list of circpad_machine_spec_t's parsed from consensus and
106 * torrc that have origin_side == 1 (ie: are for client side).
108 * The machines in this smartlist are considered immutable and they are used
109 * as-is by circuits so they should not change or get deallocated in Tor's
110 * runtime and as long as circuits are alive. */
111 STATIC smartlist_t
*origin_padding_machines
= NULL
;
113 /** This is the list of circpad_machine_spec_t's parsed from consensus and
114 * torrc that have origin_side == 0 (ie: are for relay side).
116 * The machines in this smartlist are considered immutable and they are used
117 * as-is by circuits so they should not change or get deallocated in Tor's
118 * runtime and as long as circuits are alive. */
119 STATIC smartlist_t
*relay_padding_machines
= NULL
;
122 /** Loop over the current padding state machines using <b>loop_var</b> as the
124 #define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
126 for (int loop_var = 0; loop_var < CIRCPAD_MAX_MACHINES; loop_var++) {
127 #define FOR_EACH_CIRCUIT_MACHINE_END } STMT_END ;
129 /** Loop over the current active padding state machines using <b>loop_var</b>
130 * as the loop variable. If a machine is not active, skip it. */
131 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ) \
132 FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
133 if (!(circ)->padding_info[loop_var]) \
135 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END } STMT_END ;
136 #endif /* !defined(COCCI) */
139 * Free the machineinfo at an index
142 circpad_circuit_machineinfo_free_idx(circuit_t
*circ
, int idx
)
144 if (circ
->padding_info
[idx
]) {
145 log_fn(LOG_INFO
,LD_CIRC
, "Freeing padding info idx %d on circuit %u (%d)",
146 idx
, CIRCUIT_IS_ORIGIN(circ
) ?
147 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0,
150 tor_free(circ
->padding_info
[idx
]->histogram
);
151 timer_free(circ
->padding_info
[idx
]->padding_timer
);
152 tor_free(circ
->padding_info
[idx
]);
157 * Return true if circpad has decided to hold the circuit open for additional
158 * padding. This function is used to take and retain ownership of certain
159 * types of circuits that have padding machines on them, that have been passed
160 * to circuit_mark_for_close().
162 * circuit_mark_for_close() calls this function to ask circpad if any padding
163 * machines want to keep the circuit open longer to pad.
165 * Any non-measurement circuit that was closed for a normal, non-error reason
166 * code may be held open for up to CIRCPAD_DELAY_INFINITE microseconds between
167 * network-driven cell events.
169 * After CIRCPAD_DELAY_INFINITE microseconds of silence on a circuit, this
170 * function will no longer hold it open (it will return 0 regardless of
171 * what the machines ask for, and thus circuit_expire_old_circuits_clientside()
172 * will close the circuit after roughly 1.25hr of idle time, maximum,
173 * regardless of the padding machine state.
176 circpad_marked_circuit_for_padding(circuit_t
*circ
, int reason
)
178 /* If the circuit purpose is measurement or path bias, don't
180 if (circ
->purpose
== CIRCUIT_PURPOSE_PATH_BIAS_TESTING
||
181 circ
->purpose
== CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT
) {
185 /* If the circuit is closed for any reason other than these three valid,
186 * client-side close reasons, do not try to keep it open. It is probably
187 * damaged or unusable. Note this is OK with vanguards because
188 * controller-closed circuits have REASON=REQUESTED, so vanguards-closed
189 * circuits will not be held open (we want them to close ASAP). */
190 if (!(reason
== END_CIRC_REASON_NONE
||
191 reason
== END_CIRC_REASON_FINISHED
||
192 reason
== END_CIRC_REASON_IP_NOW_REDUNDANT
)) {
196 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, circ
) {
197 circpad_machine_runtime_t
*mi
= circ
->padding_info
[i
];
199 continue; // No padding runtime info; check next machine
202 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
204 /* If we're in END state (NULL here), then check next machine */
206 continue; // check next machine
209 /* If the machine does not want to control the circuit close itself, then
210 * check the next machine */
211 if (!circ
->padding_machine
[i
]->manage_circ_lifetime
) {
212 continue; // check next machine
215 /* If the machine has reached the END state, we can close. Check next
217 if (mi
->current_state
== CIRCPAD_STATE_END
) {
218 continue; // check next machine
221 log_info(LD_CIRC
, "Circuit %d is not marked for close because of a "
222 "pending padding machine in index %d.",
223 CIRCUIT_IS_ORIGIN(circ
) ?
224 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0, i
);
226 /* If the machine has had no network events at all within the
227 * last circpad_delay_t timespan, it's in some deadlock state.
228 * Tell circuit_mark_for_close() that we don't own it anymore.
229 * This will allow circuit_expire_old_circuits_clientside() to
232 if (circ
->padding_info
[i
]->last_cell_time_sec
+
233 (time_t)CIRCPAD_DELAY_MAX_SECS
< approx_time()) {
234 log_notice(LD_BUG
, "Circuit %d was not marked for close because of a "
235 "pending padding machine in index %d for over an hour. "
237 CIRCUIT_IS_ORIGIN(circ
) ?
238 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0,
239 i
, circuit_purpose_to_string(circ
->purpose
));
241 return 0; // abort timer reached; mark the circuit for close now
244 /* If we weren't marked dirty yet, let's pretend we're dirty now.
245 * ("Dirty" means that a circuit has been used for application traffic
246 * by Tor.. Dirty circuits have different expiry times, and are not
247 * considered in counts of built circuits, etc. By claiming that we're
248 * dirty, the rest of Tor will make decisions as if we were actually
249 * used by application data.
251 * This is most important for circuit_expire_old_circuits_clientside(),
252 * where we want that function to expire us after the padding machine
253 * has shut down, but using the MaxCircuitDirtiness timer instead of
254 * the idle circuit timer (again, we want this because we're not
255 * supposed to look idle to Guard nodes that can see our lifespan). */
256 if (!circ
->timestamp_dirty
) {
257 circ
->timestamp_dirty
= approx_time();
258 if (circ
->conflux
&& CIRCUIT_IS_ORIGIN(circ
))
259 conflux_sync_circ_fields(circ
->conflux
, TO_ORIGIN_CIRCUIT(circ
));
262 /* Take ownership of the circuit */
263 circuit_change_purpose(circ
, CIRCUIT_PURPOSE_C_CIRCUIT_PADDING
);
266 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
268 return 0; // No machine wanted to keep the circuit open; mark for close
272 * Free all the machineinfos in <b>circ</b> that match <b>machine_num</b>.
274 * If machine_ctr is non-zero, also make sure it matches the padding_info's
275 * machine counter before freeing.
277 * Returns true if any machineinfos with that number were freed.
278 * False otherwise. */
280 free_circ_machineinfos_with_machine_num(circuit_t
*circ
, int machine_num
,
281 uint32_t machine_ctr
)
284 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i
) {
285 if (circ
->padding_machine
[i
] &&
286 circ
->padding_machine
[i
]->machine_num
== machine_num
) {
287 /* If machine_ctr is non-zero, make sure it matches too. This
288 * is to ensure that old STOP messages don't shutdown newer machines. */
289 if (machine_ctr
&& circ
->padding_info
[i
] &&
290 circ
->padding_info
[i
]->machine_ctr
!= machine_ctr
) {
292 "Padding shutdown for wrong (old?) machine ctr: %u vs %u",
293 machine_ctr
, circ
->padding_info
[i
]->machine_ctr
);
295 circpad_circuit_machineinfo_free_idx(circ
, i
);
296 circ
->padding_machine
[i
] = NULL
;
300 } FOR_EACH_CIRCUIT_MACHINE_END
;
306 * Free all padding machines and mutable info associated with circuit
309 circpad_circuit_free_all_machineinfos(circuit_t
*circ
)
311 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i
) {
312 circpad_circuit_machineinfo_free_idx(circ
, i
);
313 } FOR_EACH_CIRCUIT_MACHINE_END
;
317 * Allocate a new mutable machineinfo structure.
319 STATIC circpad_machine_runtime_t
*
320 circpad_circuit_machineinfo_new(circuit_t
*on_circ
, int machine_index
)
322 circpad_machine_runtime_t
*mi
=
323 tor_malloc_zero(sizeof(circpad_machine_runtime_t
));
324 mi
->machine_index
= machine_index
;
325 mi
->on_circ
= on_circ
;
326 mi
->last_cell_time_sec
= approx_time();
327 mi
->machine_ctr
= on_circ
->padding_machine_ctr
;
333 * Return the circpad_state_t for the current state based on the
336 * This function returns NULL when the machine is in the end state or in an
339 STATIC
const circpad_state_t
*
340 circpad_machine_current_state(const circpad_machine_runtime_t
*mi
)
342 const circpad_machine_spec_t
*machine
= CIRCPAD_GET_MACHINE(mi
);
344 if (mi
->current_state
== CIRCPAD_STATE_END
) {
346 } else if (BUG(mi
->current_state
>= machine
->num_states
)) {
347 log_fn(LOG_WARN
,LD_CIRC
,
348 "Invalid circuit padding state %d",
354 return &machine
->states
[mi
->current_state
];
358 * Get the lower bound of a histogram bin.
360 * You can obtain the upper bound using histogram_get_bin_upper_bound().
362 * This function can also be called with 'bin' set to a value equal or greater
363 * than histogram_len in which case the infinity bin is chosen and
364 * CIRCPAD_DELAY_INFINITE is returned.
366 STATIC circpad_delay_t
367 circpad_histogram_bin_to_usec(const circpad_machine_runtime_t
*mi
,
368 circpad_hist_index_t bin
)
370 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
371 circpad_delay_t rtt_add_usec
= 0;
373 /* Our state should have been checked to be non-null by the caller
374 * (circpad_machine_remove_token()) */
375 if (BUG(state
== NULL
)) {
376 return CIRCPAD_DELAY_INFINITE
;
379 /* The infinity bin has an upper bound of infinity, so make sure we return
380 * that if they ask for it. */
381 if (bin
> CIRCPAD_INFINITY_BIN(state
)) {
382 return CIRCPAD_DELAY_INFINITE
;
385 /* If we are using an RTT estimate, consider it as well. */
386 if (state
->use_rtt_estimate
) {
387 rtt_add_usec
= mi
->rtt_estimate_usec
;
390 return state
->histogram_edges
[bin
] + rtt_add_usec
;
394 * Like circpad_histogram_bin_to_usec() but return the upper bound of bin.
395 * (The upper bound is included in the bin.)
397 STATIC circpad_delay_t
398 histogram_get_bin_upper_bound(const circpad_machine_runtime_t
*mi
,
399 circpad_hist_index_t bin
)
401 return circpad_histogram_bin_to_usec(mi
, bin
+1) - 1;
404 /** Return the midpoint of the histogram bin <b>bin_index</b>. */
405 static circpad_delay_t
406 circpad_get_histogram_bin_midpoint(const circpad_machine_runtime_t
*mi
,
409 circpad_delay_t left_bound
= circpad_histogram_bin_to_usec(mi
, bin_index
);
410 circpad_delay_t right_bound
= histogram_get_bin_upper_bound(mi
, bin_index
);
412 return left_bound
+ (right_bound
- left_bound
)/2;
416 * Return the bin that contains the usec argument.
417 * "Contains" is defined as us in [lower, upper).
419 * This function will never return the infinity bin (histogram_len-1), in order
420 * to simplify the rest of the code, so if a usec is provided that falls above
421 * the highest non-infinity bin, that bin index will be returned.
423 STATIC circpad_hist_index_t
424 circpad_histogram_usec_to_bin(const circpad_machine_runtime_t
*mi
,
425 circpad_delay_t usec
)
427 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
428 circpad_delay_t rtt_add_usec
= 0;
429 circpad_hist_index_t bin
;
431 /* Our state should have been checked to be non-null by the caller
432 * (circpad_machine_remove_token()) */
433 if (BUG(state
== NULL
)) {
437 /* If we are using an RTT estimate, consider it as well. */
438 if (state
->use_rtt_estimate
) {
439 rtt_add_usec
= mi
->rtt_estimate_usec
;
442 /* Walk through the bins and check the upper bound of each bin, if 'usec' is
443 * less-or-equal to that, return that bin. If rtt_estimate is enabled then
444 * add that to the upper bound of each bin.
446 * We don't want to return the infinity bin here, so don't go there. */
447 for (bin
= 0 ; bin
< CIRCPAD_INFINITY_BIN(state
) ; bin
++) {
448 if (usec
<= histogram_get_bin_upper_bound(mi
, bin
) + rtt_add_usec
) {
453 /* We don't want to return the infinity bin here, so if we still didn't find
454 * the right bin, return the highest non-infinity bin */
455 return CIRCPAD_INFINITY_BIN(state
)-1;
459 * Return true if the machine supports token removal.
461 * Token removal is equivalent to having a mutable histogram in the
462 * circpad_machine_runtime_t mutable info. So while we're at it,
463 * let's assert that everything is consistent between the mutable
464 * runtime and the readonly machine spec.
467 circpad_is_token_removal_supported(circpad_machine_runtime_t
*mi
)
469 /* No runtime histogram == no token removal */
470 if (mi
->histogram
== NULL
) {
471 /* Machines that don't want token removal are trying to avoid
472 * potentially expensive mallocs, extra memory accesses, and/or
473 * potentially expensive monotime calls. Let's minimize checks
474 * and keep this path fast. */
475 tor_assert_nonfatal(mi
->histogram_len
== 0);
478 /* Machines that do want token removal are less sensitive to performance.
479 * Let's spend some time to check that our state is consistent and sane */
480 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
484 tor_assert_nonfatal(state
->token_removal
!= CIRCPAD_TOKEN_REMOVAL_NONE
);
485 tor_assert_nonfatal(state
->histogram_len
== mi
->histogram_len
);
486 tor_assert_nonfatal(mi
->histogram_len
!= 0);
490 tor_assert_nonfatal_unreached();
495 * This function frees any token bins allocated from a previous state
497 * Called after a state transition, or if the bins are empty.
500 circpad_machine_setup_tokens(circpad_machine_runtime_t
*mi
)
502 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
504 /* If this state doesn't exist, or doesn't have token removal,
505 * free any previous state's runtime histogram, and bail.
507 * If we don't have a token removal strategy, we also don't need a runtime
508 * histogram and we rely on the immutable one in machine_spec_t. */
509 if (!state
|| state
->token_removal
== CIRCPAD_TOKEN_REMOVAL_NONE
) {
511 tor_free(mi
->histogram
);
512 mi
->histogram
= NULL
;
513 mi
->histogram_len
= 0;
518 /* Try to avoid re-mallocing if we don't really need to */
519 if (!mi
->histogram
|| (mi
->histogram
520 && mi
->histogram_len
!= state
->histogram_len
)) {
521 tor_free(mi
->histogram
); // null ok
522 mi
->histogram
= tor_malloc_zero(sizeof(circpad_hist_token_t
)
523 *state
->histogram_len
);
525 mi
->histogram_len
= state
->histogram_len
;
527 memcpy(mi
->histogram
, state
->histogram
,
528 sizeof(circpad_hist_token_t
)*state
->histogram_len
);
532 * Choose a length for this state (in cells), if specified.
535 circpad_choose_state_length(circpad_machine_runtime_t
*mi
)
537 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
540 if (!state
|| state
->length_dist
.type
== CIRCPAD_DIST_NONE
) {
541 mi
->state_length
= CIRCPAD_STATE_LENGTH_INFINITE
;
545 length
= circpad_distribution_sample(state
->length_dist
);
546 length
= MAX(0, length
);
547 length
+= state
->start_length
;
549 if (state
->max_length
) {
550 length
= MIN(length
, state
->max_length
);
553 mi
->state_length
= clamp_double_to_int64(length
);
555 log_info(LD_CIRC
, "State length sampled to %"PRIu64
" for circuit %u",
556 mi
->state_length
, CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
557 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0);
561 * Sample a value from our iat_dist, and clamp it safely
562 * to circpad_delay_t.
564 * Before returning, add <b>delay_shift</b> (can be zero) to the sampled value.
566 static circpad_delay_t
567 circpad_distribution_sample_iat_delay(const circpad_state_t
*state
,
568 circpad_delay_t delay_shift
)
570 double val
= circpad_distribution_sample(state
->iat_dist
);
571 /* These comparisons are safe, because the output is in the range
572 * [0, 2**32), and double has a precision of 53 bits. */
573 /* We want a positive sample value */
575 /* Respect the maximum sample setting */
576 val
= MIN(val
, state
->dist_max_sample_usec
);
578 /* Now apply the shift:
579 * This addition is exact: val is at most 2**32-1, delay_shift is at most
580 * 2**32-1, and doubles have a precision of 53 bits. */
583 /* Clamp the distribution at infinite delay val */
584 return (circpad_delay_t
)MIN(tor_llround(val
), CIRCPAD_DELAY_INFINITE
);
588 * Sample an expected time-until-next-packet delay from the histogram or
589 * probability distribution.
591 * A bin of the histogram is chosen with probability proportional to the number
592 * of tokens in each bin, and then a time value is chosen uniformly from that
593 * bin's [start,end) time range.
595 STATIC circpad_delay_t
596 circpad_machine_sample_delay(circpad_machine_runtime_t
*mi
)
598 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
599 const circpad_hist_token_t
*histogram
= NULL
;
600 circpad_hist_index_t curr_bin
= 0;
601 circpad_delay_t bin_start
, bin_end
;
602 /* These three must all be larger than circpad_hist_token_t, because
603 * we sum several circpad_hist_token_t values across the histogram */
604 uint64_t curr_weight
= 0;
605 uint64_t histogram_total_tokens
= 0;
610 if (state
->iat_dist
.type
!= CIRCPAD_DIST_NONE
) {
611 /* Sample from a fixed IAT distribution and return */
612 circpad_delay_t iat_delay_shift
= state
->use_rtt_estimate
?
613 mi
->rtt_estimate_usec
+ state
->dist_added_shift_usec
:
614 state
->dist_added_shift_usec
;
615 return circpad_distribution_sample_iat_delay(state
, iat_delay_shift
);
616 } else if (circpad_is_token_removal_supported(mi
)) {
617 histogram
= mi
->histogram
;
618 for (circpad_hist_index_t b
= 0; b
< state
->histogram_len
; b
++)
619 histogram_total_tokens
+= histogram
[b
];
621 /* We have a histogram, but it's immutable */
622 histogram
= state
->histogram
;
623 histogram_total_tokens
= state
->histogram_total_tokens
;
626 /* If we are out of tokens, don't schedule padding. */
627 if (!histogram_total_tokens
) {
628 return CIRCPAD_DELAY_INFINITE
;
631 bin_choice
= crypto_fast_rng_get_uint64(get_thread_fast_rng(),
632 histogram_total_tokens
);
634 /* Skip all the initial zero bins */
635 while (!histogram
[curr_bin
]) {
638 curr_weight
= histogram
[curr_bin
];
640 // TODO: This is not constant-time. Pretty sure we don't
641 // really need it to be, though.
642 while (curr_weight
< bin_choice
) {
644 /* It should be impossible to run past the end of the histogram */
645 if (BUG(curr_bin
>= state
->histogram_len
)) {
646 return CIRCPAD_DELAY_INFINITE
;
648 curr_weight
+= histogram
[curr_bin
];
651 /* Do some basic checking of the current bin we are in */
652 if (BUG(curr_bin
>= state
->histogram_len
) ||
653 BUG(histogram
[curr_bin
] == 0)) {
654 return CIRCPAD_DELAY_INFINITE
;
657 // Store this index to remove the token upon callback.
658 if (state
->token_removal
!= CIRCPAD_TOKEN_REMOVAL_NONE
) {
659 mi
->chosen_bin
= curr_bin
;
662 if (curr_bin
>= CIRCPAD_INFINITY_BIN(state
)) {
663 if (state
->token_removal
!= CIRCPAD_TOKEN_REMOVAL_NONE
&&
664 mi
->histogram
[curr_bin
] > 0) {
665 mi
->histogram
[curr_bin
]--;
668 // Infinity: Don't send a padding packet. Wait for a real packet
669 // and then see if our bins are empty or what else we should do.
670 return CIRCPAD_DELAY_INFINITE
;
673 tor_assert(curr_bin
< CIRCPAD_INFINITY_BIN(state
));
675 bin_start
= circpad_histogram_bin_to_usec(mi
, curr_bin
);
676 /* We don't need to reduct 1 from the upper bound because the random range
677 * function below samples from [bin_start, bin_end) */
678 bin_end
= circpad_histogram_bin_to_usec(mi
, curr_bin
+1);
680 /* Bin edges are monotonically increasing so this is a bug. Handle it. */
681 if (BUG(bin_start
>= bin_end
)) {
685 return (circpad_delay_t
)crypto_fast_rng_uint64_range(get_thread_fast_rng(),
690 * Sample a value from the specified probability distribution.
692 * Uses functions from src/lib/math/prob_distr.c .
695 circpad_distribution_sample(circpad_distribution_t dist
)
697 log_fn(LOG_DEBUG
,LD_CIRC
, "Sampling delay with distribution %d",
701 case CIRCPAD_DIST_NONE
:
703 /* We should not get in here like this */
704 tor_assert_nonfatal_unreached();
707 case CIRCPAD_DIST_UNIFORM
:
709 // param2 is upper bound, param1 is lower
710 const struct uniform_t my_uniform
= {
711 .base
= UNIFORM(my_uniform
),
715 return dist_sample(&my_uniform
.base
);
717 case CIRCPAD_DIST_LOGISTIC
:
719 /* param1 is Mu, param2 is sigma. */
720 const struct logistic_t my_logistic
= {
721 .base
= LOGISTIC(my_logistic
),
723 .sigma
= dist
.param2
,
725 return dist_sample(&my_logistic
.base
);
727 case CIRCPAD_DIST_LOG_LOGISTIC
:
729 /* param1 is Alpha, param2 is 1.0/Beta */
730 const struct log_logistic_t my_log_logistic
= {
731 .base
= LOG_LOGISTIC(my_log_logistic
),
732 .alpha
= dist
.param1
,
735 return dist_sample(&my_log_logistic
.base
);
737 case CIRCPAD_DIST_GEOMETRIC
:
739 /* param1 is 'p' (success probability) */
740 const struct geometric_t my_geometric
= {
741 .base
= GEOMETRIC(my_geometric
),
744 return dist_sample(&my_geometric
.base
);
746 case CIRCPAD_DIST_WEIBULL
:
748 /* param1 is k, param2 is Lambda */
749 const struct weibull_t my_weibull
= {
750 .base
= WEIBULL(my_weibull
),
752 .lambda
= dist
.param2
,
754 return dist_sample(&my_weibull
.base
);
756 case CIRCPAD_DIST_PARETO
:
758 /* param1 is sigma, param2 is xi, no more params for mu so we use 0 */
759 const struct genpareto_t my_genpareto
= {
760 .base
= GENPARETO(my_genpareto
),
762 .sigma
= dist
.param1
,
765 return dist_sample(&my_genpareto
.base
);
769 tor_assert_nonfatal_unreached();
774 * Find the index of the first bin whose upper bound is
775 * greater than the target, and that has tokens remaining.
777 * Used for histograms with token removal.
779 static circpad_hist_index_t
780 circpad_machine_first_higher_index(const circpad_machine_runtime_t
*mi
,
781 circpad_delay_t target_bin_usec
)
783 circpad_hist_index_t bin
= circpad_histogram_usec_to_bin(mi
,
786 /* Don't remove from the infinity bin */
787 for (; bin
< CIRCPAD_INFINITY_BIN(mi
); bin
++) {
788 if (mi
->histogram
[bin
] &&
789 histogram_get_bin_upper_bound(mi
, bin
) >= target_bin_usec
) {
794 return mi
->histogram_len
;
798 * Find the index of the first bin whose lower bound is lower or equal to
799 * <b>target_bin_usec</b>, and that still has tokens remaining.
801 * Used for histograms with token removal.
803 static circpad_hist_index_t
804 circpad_machine_first_lower_index(const circpad_machine_runtime_t
*mi
,
805 circpad_delay_t target_bin_usec
)
807 circpad_hist_index_t bin
= circpad_histogram_usec_to_bin(mi
,
810 for (; bin
>= 0; bin
--) {
811 if (mi
->histogram
[bin
] &&
812 circpad_histogram_bin_to_usec(mi
, bin
) <= target_bin_usec
) {
821 * Remove a token from the first non-empty bin whose upper bound is
822 * greater than the target.
824 * Used for histograms with token removal.
827 circpad_machine_remove_higher_token(circpad_machine_runtime_t
*mi
,
828 circpad_delay_t target_bin_usec
)
830 /* We need to remove the token from the first bin
831 * whose upper bound is greater than the target, and that
832 * has tokens remaining. */
833 circpad_hist_index_t bin
= circpad_machine_first_higher_index(mi
,
836 if (bin
>= 0 && bin
< CIRCPAD_INFINITY_BIN(mi
)) {
837 if (!BUG(mi
->histogram
[bin
] == 0)) {
838 mi
->histogram
[bin
]--;
844 * Remove a token from the first non-empty bin whose upper bound is
845 * lower than the target.
847 * Used for histograms with token removal.
850 circpad_machine_remove_lower_token(circpad_machine_runtime_t
*mi
,
851 circpad_delay_t target_bin_usec
)
853 circpad_hist_index_t bin
= circpad_machine_first_lower_index(mi
,
856 if (bin
>= 0 && bin
< CIRCPAD_INFINITY_BIN(mi
)) {
857 if (!BUG(mi
->histogram
[bin
] == 0)) {
858 mi
->histogram
[bin
]--;
863 /* Helper macro: Ensure that the bin has tokens available, and BUG out of the
864 * function if it's not the case. */
865 #define ENSURE_BIN_CAPACITY(bin_index) \
866 if (BUG(mi->histogram[bin_index] == 0)) { \
871 * Remove a token from the closest non-empty bin to the target.
873 * If use_usec is true, measure "closest" in terms of the next closest bin
876 * If it is false, use bin index distance only.
878 * Used for histograms with token removal.
881 circpad_machine_remove_closest_token(circpad_machine_runtime_t
*mi
,
882 circpad_delay_t target_bin_usec
,
885 circpad_hist_index_t lower
, higher
, current
;
886 circpad_hist_index_t bin_to_remove
= -1;
888 lower
= circpad_machine_first_lower_index(mi
, target_bin_usec
);
889 higher
= circpad_machine_first_higher_index(mi
, target_bin_usec
);
890 current
= circpad_histogram_usec_to_bin(mi
, target_bin_usec
);
892 /* Sanity check the results */
893 if (BUG(lower
> current
) || BUG(higher
< current
)) {
897 /* Take care of edge cases first */
898 if (higher
== mi
->histogram_len
&& lower
== -1) {
899 /* All bins are empty */
901 } else if (higher
== mi
->histogram_len
) {
902 /* All higher bins are empty */
903 ENSURE_BIN_CAPACITY(lower
);
904 mi
->histogram
[lower
]--;
906 } else if (lower
== -1) {
907 /* All lower bins are empty */
908 ENSURE_BIN_CAPACITY(higher
);
909 mi
->histogram
[higher
]--;
913 /* Now handle the intermediate cases */
915 /* Find the closest bin midpoint to the target */
916 circpad_delay_t lower_usec
= circpad_get_histogram_bin_midpoint(mi
, lower
);
917 circpad_delay_t higher_usec
=
918 circpad_get_histogram_bin_midpoint(mi
, higher
);
920 if (target_bin_usec
< lower_usec
) {
921 // Lower bin is closer
922 ENSURE_BIN_CAPACITY(lower
);
923 bin_to_remove
= lower
;
924 } else if (target_bin_usec
> higher_usec
) {
925 // Higher bin is closer
926 ENSURE_BIN_CAPACITY(higher
);
927 bin_to_remove
= higher
;
928 } else if (target_bin_usec
-lower_usec
> higher_usec
-target_bin_usec
) {
929 // Higher bin is closer
930 ENSURE_BIN_CAPACITY(higher
);
931 bin_to_remove
= higher
;
933 // Lower bin is closer
934 ENSURE_BIN_CAPACITY(lower
);
935 bin_to_remove
= lower
;
937 mi
->histogram
[bin_to_remove
]--;
938 log_debug(LD_CIRC
, "Removing token from bin %d", bin_to_remove
);
941 if (current
- lower
> higher
- current
) {
942 // Higher bin is closer
943 ENSURE_BIN_CAPACITY(higher
);
944 mi
->histogram
[higher
]--;
947 // Lower bin is closer
948 ENSURE_BIN_CAPACITY(lower
);
949 mi
->histogram
[lower
]--;
955 #undef ENSURE_BIN_CAPACITY
958 * Remove a token from the exact bin corresponding to the target.
960 * If it is empty, do nothing.
962 * Used for histograms with token removal.
965 circpad_machine_remove_exact(circpad_machine_runtime_t
*mi
,
966 circpad_delay_t target_bin_usec
)
968 circpad_hist_index_t bin
= circpad_histogram_usec_to_bin(mi
,
971 if (mi
->histogram
[bin
] > 0)
972 mi
->histogram
[bin
]--;
976 * Check our state's cell limit count and tokens.
978 * Returns 1 if either limits are hit and we decide to change states,
979 * otherwise returns 0.
981 static circpad_decision_t
982 check_machine_token_supply(circpad_machine_runtime_t
*mi
)
984 uint32_t histogram_total_tokens
= 0;
986 /* Check if bins empty. This requires summing up the current mutable
987 * machineinfo histogram token total and checking if it is zero.
988 * Machineinfo does not keep a running token count. We're assuming the
989 * extra space is not worth this short loop iteration.
991 * We also do not count infinity bin in histogram totals.
993 if (circpad_is_token_removal_supported(mi
)) {
994 for (circpad_hist_index_t b
= 0; b
< CIRCPAD_INFINITY_BIN(mi
); b
++)
995 histogram_total_tokens
+= mi
->histogram
[b
];
997 /* If we change state, we're done */
998 if (histogram_total_tokens
== 0) {
999 if (circpad_internal_event_bins_empty(mi
) == CIRCPAD_STATE_CHANGED
)
1000 return CIRCPAD_STATE_CHANGED
;
1004 if (mi
->state_length
== 0) {
1005 return circpad_internal_event_state_length_up(mi
);
1008 return CIRCPAD_STATE_UNCHANGED
;
1012 * Count that a padding packet was sent.
1014 * This updates our state length count, our machine rate limit counts,
1015 * and if token removal is used, decrements the histogram.
1018 circpad_machine_count_padding_sent(circpad_machine_runtime_t
*mi
)
1020 /* If we have a valid state length bound, consider it */
1021 if (mi
->state_length
!= CIRCPAD_STATE_LENGTH_INFINITE
&&
1022 !BUG(mi
->state_length
<= 0)) {
1027 * Update non-padding counts for rate limiting: We scale at UINT16_MAX
1028 * because we only use this for a percentile limit of 2 sig figs, and
1029 * space is scare in the machineinfo struct.
1032 if (mi
->padding_sent
== UINT16_MAX
) {
1033 mi
->padding_sent
/= 2;
1034 mi
->nonpadding_sent
/= 2;
1037 circpad_global_padding_sent
++;
1039 /* If we have a mutable histogram, reduce the token count from
1040 * the chosen padding bin (this assumes we always send padding
1041 * when we intended to). */
1042 if (circpad_is_token_removal_supported(mi
)) {
1043 /* Check array bounds and token count before removing */
1044 if (!BUG(mi
->chosen_bin
>= mi
->histogram_len
) &&
1045 !BUG(mi
->histogram
[mi
->chosen_bin
] == 0)) {
1046 mi
->histogram
[mi
->chosen_bin
]--;
1052 * Count a nonpadding packet as being sent.
1054 * This function updates our overhead accounting variables, as well
1055 * as decrements the state limit packet counter, if the latter was
1056 * flagged as applying to non-padding as well.
1059 circpad_machine_count_nonpadding_sent(circpad_machine_runtime_t
*mi
)
1061 /* Update non-padding counts for rate limiting: We scale at UINT16_MAX
1062 * because we only use this for a percentile limit of 2 sig figs, and
1063 * space is scare in the machineinfo struct. */
1064 mi
->nonpadding_sent
++;
1065 if (mi
->nonpadding_sent
== UINT16_MAX
) {
1066 mi
->padding_sent
/= 2;
1067 mi
->nonpadding_sent
/= 2;
1070 /* Update any state packet length limits that apply */
1071 circpad_machine_update_state_length_for_nonpadding(mi
);
1073 /* Remove a token from the histogram, if applicable */
1074 circpad_machine_remove_token(mi
);
1078 * Decrement the state length counter for a non-padding packet.
1080 * Only updates the state length if we're using that feature, we
1081 * have a state, and the machine wants to count non-padding packets
1082 * towards the state length.
1085 circpad_machine_update_state_length_for_nonpadding(
1086 circpad_machine_runtime_t
*mi
)
1088 const circpad_state_t
*state
= NULL
;
1090 if (mi
->state_length
== CIRCPAD_STATE_LENGTH_INFINITE
)
1093 state
= circpad_machine_current_state(mi
);
1095 /* If we are not in a padding state (like start or end), we're done */
1099 /* If we're enforcing a state length on non-padding packets,
1101 if (state
->length_includes_nonpadding
&&
1102 mi
->state_length
> 0) {
1108 * When a non-padding packet arrives, remove a token from the bin
1109 * corresponding to the delta since last sent packet. If that bin
1110 * is empty, choose a token based on the specified removal strategy
1111 * in the state machine.
1114 circpad_machine_remove_token(circpad_machine_runtime_t
*mi
)
1116 const circpad_state_t
*state
= NULL
;
1117 circpad_time_t current_time
;
1118 circpad_delay_t target_bin_usec
;
1120 /* Dont remove any tokens if there was no padding scheduled */
1121 if (!mi
->padding_scheduled_at_usec
) {
1125 state
= circpad_machine_current_state(mi
);
1127 /* If we are not in a padding state (like start or end), we're done */
1130 /* Don't remove any tokens if we're not doing token removal */
1131 if (state
->token_removal
== CIRCPAD_TOKEN_REMOVAL_NONE
)
1134 current_time
= monotime_absolute_usec();
1136 /* If we have scheduled padding some time in the future, we want to see what
1137 bin we are in at the current time */
1138 target_bin_usec
= (circpad_delay_t
)
1139 MIN((current_time
- mi
->padding_scheduled_at_usec
),
1140 CIRCPAD_DELAY_INFINITE
-1);
1142 /* We are treating this non-padding cell as a padding cell, so we cancel
1143 padding timer, if present. */
1144 mi
->padding_scheduled_at_usec
= 0;
1145 if (mi
->is_padding_timer_scheduled
) {
1146 mi
->is_padding_timer_scheduled
= 0;
1147 timer_disable(mi
->padding_timer
);
1150 /* Perform the specified token removal strategy */
1151 switch (state
->token_removal
) {
1152 case CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC
:
1153 circpad_machine_remove_closest_token(mi
, target_bin_usec
, 1);
1155 case CIRCPAD_TOKEN_REMOVAL_CLOSEST
:
1156 circpad_machine_remove_closest_token(mi
, target_bin_usec
, 0);
1158 case CIRCPAD_TOKEN_REMOVAL_LOWER
:
1159 circpad_machine_remove_lower_token(mi
, target_bin_usec
);
1161 case CIRCPAD_TOKEN_REMOVAL_HIGHER
:
1162 circpad_machine_remove_higher_token(mi
, target_bin_usec
);
1164 case CIRCPAD_TOKEN_REMOVAL_EXACT
:
1165 circpad_machine_remove_exact(mi
, target_bin_usec
);
1167 case CIRCPAD_TOKEN_REMOVAL_NONE
:
1169 tor_assert_nonfatal_unreached();
1170 log_warn(LD_BUG
, "Circpad: Unknown token removal strategy %d",
1171 state
->token_removal
);
1177 * Send a relay command with a relay cell payload on a circuit to
1178 * the particular hopnum.
1180 * Hopnum starts at 1 (1=guard, 2=middle, 3=exit, etc).
1182 * Payload may be null.
1184 * Returns negative on error, 0 on success.
1186 MOCK_IMPL(STATIC signed_error_t
,
1187 circpad_send_command_to_hop
,(origin_circuit_t
*circ
, uint8_t hopnum
,
1188 uint8_t relay_command
, const uint8_t *payload
,
1189 ssize_t payload_len
))
1191 crypt_path_t
*target_hop
= circuit_get_cpath_hop(circ
, hopnum
);
1194 /* Check that the cpath has the target hop */
1196 log_fn(LOG_WARN
, LD_BUG
, "Padding circuit %u has %d hops, not %d",
1197 circ
->global_identifier
, circuit_get_cpath_len(circ
), hopnum
);
1201 /* Check that the target hop is opened */
1202 if (target_hop
->state
!= CPATH_STATE_OPEN
) {
1203 log_fn(LOG_WARN
,LD_CIRC
,
1204 "Padding circuit %u has %d hops, not %d",
1205 circ
->global_identifier
,
1206 circuit_get_cpath_opened_len(circ
), hopnum
);
1210 /* Send the drop command to the second hop */
1211 ret
= relay_send_command_from_edge(0, TO_CIRCUIT(circ
), relay_command
,
1212 (const char*)payload
, payload_len
,
1218 * Callback helper to send a padding cell.
1220 * This helper is called after our histogram-sampled delay period passes
1221 * without another packet being sent first. If a packet is sent before this
1222 * callback happens, it is canceled. So when we're called here, send padding
1225 * If sending this padding cell forced us to transition states return
1226 * CIRCPAD_STATE_CHANGED. Otherwise return CIRCPAD_STATE_UNCHANGED.
1229 circpad_send_padding_cell_for_callback(circpad_machine_runtime_t
*mi
)
1231 circuit_t
*circ
= mi
->on_circ
;
1232 int machine_idx
= mi
->machine_index
;
1233 mi
->padding_scheduled_at_usec
= 0;
1234 mi
->is_padding_timer_scheduled
= 0;
1235 circpad_statenum_t state
= mi
->current_state
;
1237 /* Make sure circuit didn't close on us */
1238 if (mi
->on_circ
->marked_for_close
) {
1239 log_fn(LOG_INFO
,LD_CIRC
,
1240 "Padding callback on circuit marked for close (%u). Ignoring.",
1241 CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
1242 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0);
1243 return CIRCPAD_STATE_CHANGED
;
1246 circpad_machine_count_padding_sent(mi
);
1248 if (CIRCUIT_IS_ORIGIN(mi
->on_circ
)) {
1249 circpad_send_command_to_hop(TO_ORIGIN_CIRCUIT(mi
->on_circ
),
1250 CIRCPAD_GET_MACHINE(mi
)->target_hopnum
,
1251 RELAY_COMMAND_DROP
, NULL
, 0);
1252 log_info(LD_CIRC
, "Callback: Sending padding to origin circuit %u"
1253 " (%d) [length: %"PRIu64
"]",
1254 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
,
1255 mi
->on_circ
->purpose
, mi
->state_length
);
1257 // If we're a non-origin circ, we can just send from here as if we're the
1259 if (TO_OR_CIRCUIT(circ
)->p_chan_cells
.n
<= circpad_max_circ_queued_cells
) {
1260 log_info(LD_CIRC
, "Callback: Sending padding to circuit (%d)"
1261 " [length: %"PRIu64
"]", mi
->on_circ
->purpose
, mi
->state_length
);
1262 relay_send_command_from_edge(0, mi
->on_circ
, RELAY_COMMAND_DROP
, NULL
,
1264 rep_hist_padding_count_write(PADDING_TYPE_DROP
);
1266 static ratelim_t cell_lim
= RATELIM_INIT(600);
1267 log_fn_ratelim(&cell_lim
,LOG_NOTICE
,LD_CIRC
,
1268 "Too many cells (%d) in circ queue to send padding.",
1269 TO_OR_CIRCUIT(circ
)->p_chan_cells
.n
);
1273 /* This is a padding cell sent from the client or from the middle node,
1274 * (because it's invoked from circuitpadding.c) */
1275 circpad_cell_event_padding_sent(circ
);
1277 /* The circpad_cell_event_padding_sent() could cause us to transition.
1278 * Check that we still have a padding machineinfo, and then check our token
1280 if (circ
->padding_info
[machine_idx
] != NULL
) {
1281 if (state
!= circ
->padding_info
[machine_idx
]->current_state
)
1282 return CIRCPAD_STATE_CHANGED
;
1284 return check_machine_token_supply(circ
->padding_info
[machine_idx
]);
1286 return CIRCPAD_STATE_CHANGED
;
1291 * Tor-timer compatible callback that tells us to send a padding cell.
1293 * Timers are associated with circpad_machine_runtime_t's. When the machineinfo
1294 * is freed on a circuit, the timers are cancelled. Since the lifetime
1295 * of machineinfo is always longer than the timers, handles are not
1299 circpad_send_padding_callback(tor_timer_t
*timer
, void *args
,
1300 const struct monotime_t
*time
)
1302 circpad_machine_runtime_t
*mi
= ((circpad_machine_runtime_t
*)args
);
1303 (void)timer
; (void)time
;
1305 if (mi
&& mi
->on_circ
) {
1306 assert_circuit_ok(mi
->on_circ
);
1307 circpad_send_padding_cell_for_callback(mi
);
1309 // This shouldn't happen (represents a timer leak)
1310 log_fn(LOG_WARN
,LD_CIRC
,
1311 "Circuit closed while waiting for padding timer.");
1312 tor_fragile_assert();
1315 // TODO-MP-AP: Unify this counter with channelpadding for rephist stats
1316 //total_timers_pending--;
1320 * Cache our consensus parameters upon consensus update.
1323 circpad_new_consensus_params(const networkstatus_t
*ns
)
1325 circpad_padding_disabled
=
1326 networkstatus_get_param(ns
, "circpad_padding_disabled",
1329 circpad_padding_reduced
=
1330 networkstatus_get_param(ns
, "circpad_padding_reduced",
1333 circpad_global_allowed_cells
=
1334 networkstatus_get_param(ns
, "circpad_global_allowed_cells",
1335 0, 0, UINT16_MAX
-1);
1337 circpad_global_max_padding_percent
=
1338 networkstatus_get_param(ns
, "circpad_global_max_padding_pct",
1341 circpad_max_circ_queued_cells
=
1342 networkstatus_get_param(ns
, "circpad_max_circ_queued_cells",
1343 CIRCWINDOW_START_MAX
, 0, 50*CIRCWINDOW_START_MAX
);
1347 * Return true if padding is allowed by torrc and consensus.
1350 circpad_is_padding_allowed(void)
1352 /* If padding has been disabled in the consensus, don't send any more
1353 * padding. Technically the machine should be shut down when the next
1354 * machine condition check happens, but machine checks only happen on
1355 * certain circuit events, and if padding is disabled due to some
1356 * network overload or DoS condition, we really want to stop ASAP. */
1357 if (circpad_padding_disabled
|| !get_options()->CircuitPadding
) {
1365 * Check this machine against its padding limits, as well as global
1368 * We have two limits: a percent and a cell count. The cell count
1369 * limit must be reached before the percent is enforced (this is to
1370 * optionally allow very light padding of things like circuit setup
1371 * while there is no other traffic on the circuit).
1373 * TODO: Don't apply limits to machines form torrc.
1375 * Returns 1 if limits are set and we've hit them. Otherwise returns 0.
1378 circpad_machine_reached_padding_limit(circpad_machine_runtime_t
*mi
)
1380 const circpad_machine_spec_t
*machine
= CIRCPAD_GET_MACHINE(mi
);
1382 /* If machine_padding_pct is non-zero, and we've sent more
1383 * than the allowed count of padding cells, then check our
1384 * percent limits for this machine. */
1385 if (machine
->max_padding_percent
&&
1386 mi
->padding_sent
>= machine
->allowed_padding_count
) {
1387 uint32_t total_cells
= mi
->padding_sent
+ mi
->nonpadding_sent
;
1389 /* Check the percent */
1390 if ((100*(uint32_t)mi
->padding_sent
) / total_cells
>
1391 machine
->max_padding_percent
) {
1392 return 1; // limit is reached. Stop.
1396 /* If circpad_max_global_padding_pct is non-zero, and we've
1397 * sent more than the global padding cell limit, then check our
1398 * global tor process percentage limit on padding. */
1399 if (circpad_global_max_padding_percent
&&
1400 circpad_global_padding_sent
>= circpad_global_allowed_cells
) {
1401 uint64_t total_cells
= circpad_global_padding_sent
+
1402 circpad_global_nonpadding_sent
;
1404 /* Check the percent */
1405 if ((100*circpad_global_padding_sent
) / total_cells
>
1406 circpad_global_max_padding_percent
) {
1407 return 1; // global limit reached. Stop.
1411 return 0; // All good!
1415 * Schedule the next padding time according to the machineinfo on a
1418 * The histograms represent inter-packet-delay. Whenever you get an packet
1419 * event you should be scheduling your next timer (after cancelling any old
1420 * ones and updating tokens accordingly).
1422 * Returns 1 if we decide to transition states (due to infinity bin),
1425 MOCK_IMPL(circpad_decision_t
,
1426 circpad_machine_schedule_padding
,(circpad_machine_runtime_t
*mi
))
1428 circpad_delay_t in_usec
= 0;
1429 struct timeval timeout
;
1432 /* Don't schedule padding if it is disabled */
1433 if (!circpad_is_padding_allowed()) {
1434 static ratelim_t padding_lim
= RATELIM_INIT(600);
1435 log_fn_ratelim(&padding_lim
,LOG_INFO
,LD_CIRC
,
1436 "Padding has been disabled, but machine still on circuit %"PRIu64
1438 mi
->on_circ
->n_chan
? mi
->on_circ
->n_chan
->global_identifier
: 0,
1439 mi
->on_circ
->n_circ_id
);
1441 return CIRCPAD_STATE_UNCHANGED
;
1444 /* Don't schedule padding if we are currently in dormant mode. */
1445 if (!is_participating_on_network()) {
1446 log_info(LD_CIRC
, "Not scheduling padding because we are dormant.");
1447 return CIRCPAD_STATE_UNCHANGED
;
1450 // Don't pad in end (but also don't cancel any previously
1451 // scheduled padding either).
1452 if (mi
->current_state
== CIRCPAD_STATE_END
) {
1453 log_fn(LOG_INFO
, LD_CIRC
, "Padding end state on circuit %u",
1454 CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
1455 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0);
1456 return CIRCPAD_STATE_UNCHANGED
;
1459 /* Check our padding limits */
1460 if (circpad_machine_reached_padding_limit(mi
)) {
1461 if (CIRCUIT_IS_ORIGIN(mi
->on_circ
)) {
1462 log_fn(LOG_INFO
, LD_CIRC
,
1463 "Padding machine has reached padding limit on circuit %u",
1464 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
);
1466 static ratelim_t padding_lim
= RATELIM_INIT(600);
1467 log_fn_ratelim(&padding_lim
,LOG_INFO
,LD_CIRC
,
1468 "Padding machine has reached padding limit on circuit %"PRIu64
1470 mi
->on_circ
->n_chan
? mi
->on_circ
->n_chan
->global_identifier
: 0,
1471 mi
->on_circ
->n_circ_id
);
1473 return CIRCPAD_STATE_UNCHANGED
;
1476 if (mi
->is_padding_timer_scheduled
) {
1477 /* Cancel current timer (if any) */
1478 timer_disable(mi
->padding_timer
);
1479 mi
->is_padding_timer_scheduled
= 0;
1482 /* in_usec = in microseconds */
1483 in_usec
= circpad_machine_sample_delay(mi
);
1484 /* If we're using token removal, we need to know when the padding
1485 * was scheduled at, so we can remove the appropriate token if
1486 * a non-padding cell is sent before the padding timer expires.
1488 * However, since monotime is unpredictably expensive, let's avoid
1489 * using it for machines that don't need token removal. */
1490 if (circpad_is_token_removal_supported(mi
)) {
1491 mi
->padding_scheduled_at_usec
= monotime_absolute_usec();
1493 mi
->padding_scheduled_at_usec
= 1;
1495 log_fn(LOG_INFO
,LD_CIRC
,"\tPadding in %u usec on circuit %u", in_usec
,
1496 CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
1497 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0);
1499 // Don't schedule if we have infinite delay.
1500 if (in_usec
== CIRCPAD_DELAY_INFINITE
) {
1501 return circpad_internal_event_infinity(mi
);
1504 if (mi
->state_length
== 0) {
1505 /* If we're at length 0, that means we hit 0 after sending
1506 * a cell earlier, and emitted an event for it, but
1507 * for whatever reason we did not decide to change states then.
1508 * So maybe the machine is waiting for bins empty, or for an
1509 * infinity event later? That would be a strange machine,
1510 * but there's no reason to make it impossible. */
1511 return CIRCPAD_STATE_UNCHANGED
;
1515 return circpad_send_padding_cell_for_callback(mi
);
1518 timeout
.tv_sec
= in_usec
/TOR_USEC_PER_SEC
;
1519 timeout
.tv_usec
= (in_usec
%TOR_USEC_PER_SEC
);
1521 log_fn(LOG_INFO
, LD_CIRC
, "\tPadding circuit %u in %u sec, %u usec",
1522 CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
1523 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0,
1524 (unsigned)timeout
.tv_sec
, (unsigned)timeout
.tv_usec
);
1526 if (mi
->padding_timer
) {
1527 timer_set_cb(mi
->padding_timer
, circpad_send_padding_callback
, mi
);
1530 timer_new(circpad_send_padding_callback
, mi
);
1532 timer_schedule(mi
->padding_timer
, &timeout
);
1533 mi
->is_padding_timer_scheduled
= 1;
1535 // TODO-MP-AP: Unify with channelpadding counter
1536 //rep_hist_padding_count_timers(++total_timers_pending);
1538 return CIRCPAD_STATE_UNCHANGED
;
1542 * If the machine transitioned to the END state, we need
1543 * to check to see if it wants us to shut it down immediately.
1544 * If it does, then we need to send the appropriate negotiation commands
1545 * depending on which side it is.
1547 * After this function is called, mi may point to freed memory. Do
1551 circpad_machine_spec_transitioned_to_end(circpad_machine_runtime_t
*mi
)
1553 const circpad_machine_spec_t
*machine
= CIRCPAD_GET_MACHINE(mi
);
1554 circuit_t
*on_circ
= mi
->on_circ
;
1556 log_fn(LOG_INFO
,LD_CIRC
, "Padding machine in end state on circuit %u (%d)",
1557 CIRCUIT_IS_ORIGIN(on_circ
) ?
1558 TO_ORIGIN_CIRCUIT(on_circ
)->global_identifier
: 0,
1562 * We allow machines to shut down and delete themselves as opposed
1563 * to just going back to START or waiting forever in END so that
1564 * we can handle the case where this machine started while it was
1565 * the only machine that matched conditions, but *since* then more
1566 * "higher ranking" machines now match the conditions, and would
1567 * be given a chance to take precedence over this one in
1568 * circpad_add_matching_machines().
1570 * Returning to START or waiting forever in END would not give those
1571 * other machines a chance to be launched, where as shutting down
1574 if (machine
->should_negotiate_end
) {
1575 if (machine
->is_origin_side
) {
1576 /* We free the machine info here so that we can be replaced
1577 * by a different machine. But we must leave the padding_machine
1578 * in place to wait for the negotiated response */
1579 uint32_t machine_ctr
= mi
->machine_ctr
;
1580 circpad_circuit_machineinfo_free_idx(on_circ
,
1581 machine
->machine_index
);
1582 circpad_negotiate_padding(TO_ORIGIN_CIRCUIT(on_circ
),
1583 machine
->machine_num
,
1584 machine
->target_hopnum
,
1585 CIRCPAD_COMMAND_STOP
,
1588 uint32_t machine_ctr
= mi
->machine_ctr
;
1589 circpad_circuit_machineinfo_free_idx(on_circ
,
1590 machine
->machine_index
);
1591 circpad_padding_negotiated(on_circ
,
1592 machine
->machine_num
,
1593 CIRCPAD_COMMAND_STOP
,
1594 CIRCPAD_RESPONSE_OK
,
1596 on_circ
->padding_machine
[machine
->machine_index
] = NULL
;
1602 * Generic state transition function for padding state machines.
1604 * Given an event and our mutable machine info, decide if/how to
1605 * transition to a different state, and perform actions accordingly.
1607 * Returns 1 if we transition states, 0 otherwise.
1609 MOCK_IMPL(circpad_decision_t
,
1610 circpad_machine_spec_transition
,(circpad_machine_runtime_t
*mi
,
1611 circpad_event_t event
))
1613 const circpad_state_t
*state
=
1614 circpad_machine_current_state(mi
);
1616 /* If state is null we are in the end state. */
1618 /* If we in end state we don't pad no matter what. */
1619 return CIRCPAD_STATE_UNCHANGED
;
1622 /* Check if this event is ignored or causes a cancel */
1623 if (state
->next_state
[event
] == CIRCPAD_STATE_IGNORE
) {
1624 return CIRCPAD_STATE_UNCHANGED
;
1625 } else if (state
->next_state
[event
] == CIRCPAD_STATE_CANCEL
) {
1626 /* Check cancel events and cancel any pending padding */
1627 mi
->padding_scheduled_at_usec
= 0;
1628 if (mi
->is_padding_timer_scheduled
) {
1629 mi
->is_padding_timer_scheduled
= 0;
1630 /* Cancel current timer (if any) */
1631 timer_disable(mi
->padding_timer
);
1633 return CIRCPAD_STATE_UNCHANGED
;
1635 circpad_statenum_t s
= state
->next_state
[event
];
1636 /* See if we need to transition to any other states based on this event.
1637 * Whenever a transition happens, even to our own state, we schedule
1640 * So if a state only wants to schedule padding for an event, it specifies
1641 * a transition to itself. All non-specified events are ignored.
1643 log_fn(LOG_INFO
, LD_CIRC
,
1644 "Circuit %u circpad machine %d transitioning from %u to %u",
1645 CIRCUIT_IS_ORIGIN(mi
->on_circ
) ?
1646 TO_ORIGIN_CIRCUIT(mi
->on_circ
)->global_identifier
: 0,
1647 mi
->machine_index
, mi
->current_state
, s
);
1649 /* If this is not the same state, switch and init tokens,
1650 * otherwise just reschedule padding. */
1651 if (mi
->current_state
!= s
) {
1652 mi
->current_state
= s
;
1653 circpad_machine_setup_tokens(mi
);
1654 circpad_choose_state_length(mi
);
1656 /* If we transition to the end state, check to see
1657 * if this machine wants to be shut down at end */
1658 if (s
== CIRCPAD_STATE_END
) {
1659 circpad_machine_spec_transitioned_to_end(mi
);
1660 /* We transitioned but we don't pad in end. Also, mi
1661 * may be freed. Returning STATE_CHANGED prevents us
1662 * from accessing it in any callers of this function. */
1663 return CIRCPAD_STATE_CHANGED
;
1666 /* We transitioned to a new state, schedule padding */
1667 circpad_machine_schedule_padding(mi
);
1668 return CIRCPAD_STATE_CHANGED
;
1671 /* We transitioned back to the same state. Schedule padding,
1672 * and inform if that causes a state transition. */
1673 return circpad_machine_schedule_padding(mi
);
1676 return CIRCPAD_STATE_UNCHANGED
;
1680 * Estimate the circuit RTT from the current middle hop out to the
1681 * end of the circuit.
1683 * We estimate RTT by calculating the time between "receive" and
1684 * "send" at a middle hop. This is because we "receive" a cell
1685 * from the origin, and then relay it towards the exit before a
1686 * response comes back. It is that response time from the exit side
1687 * that we want to measure, so that we can make use of it for synthetic
1691 circpad_estimate_circ_rtt_on_received(circuit_t
*circ
,
1692 circpad_machine_runtime_t
*mi
)
1694 /* Origin circuits don't estimate RTT. They could do it easily enough,
1695 * but they have no reason to use it in any delay calculations. */
1696 if (CIRCUIT_IS_ORIGIN(circ
) || mi
->stop_rtt_update
)
1699 /* If we already have a last received packet time, that means we
1700 * did not get a response before this packet. The RTT estimate
1701 * only makes sense if we do not have multiple packets on the
1702 * wire, so stop estimating if this is the second packet
1703 * back to back. However, for the first set of back-to-back
1704 * packets, we can wait until the very first response comes back
1705 * to us, to measure that RTT (for the response to optimistic
1706 * data, for example). Hence stop_rtt_update is only checked
1707 * in this received side function, and not in send side below.
1709 if (mi
->last_received_time_usec
) {
1710 /* We also allow multiple back-to-back packets if the circuit is not
1711 * opened, to handle var cells.
1712 * XXX: Will this work with out var cell plans? Maybe not,
1713 * since we're opened at the middle hop as soon as we process
1714 * one var extend2 :/ */
1715 if (circ
->state
== CIRCUIT_STATE_OPEN
) {
1716 log_fn(LOG_INFO
, LD_CIRC
,
1717 "Stopping padding RTT estimation on circuit (%"PRIu64
1718 ", %d) after two back to back packets. Current RTT: %d",
1719 circ
->n_chan
? circ
->n_chan
->global_identifier
: 0,
1720 circ
->n_circ_id
, mi
->rtt_estimate_usec
);
1721 mi
->stop_rtt_update
= 1;
1723 if (!mi
->rtt_estimate_usec
) {
1724 static ratelim_t rtt_lim
= RATELIM_INIT(600);
1725 log_fn_ratelim(&rtt_lim
,LOG_NOTICE
,LD_BUG
,
1726 "Circuit got two cells back to back before estimating RTT.");
1730 const circpad_state_t
*state
= circpad_machine_current_state(mi
);
1735 /* Since monotime is unpredictably expensive, only update this field
1736 * if rtt estimates are needed. Otherwise, stop the rtt update. */
1737 if (state
->use_rtt_estimate
) {
1738 mi
->last_received_time_usec
= monotime_absolute_usec();
1740 /* Let's fast-path future decisions not to update rtt if the
1741 * feature is not in use. */
1742 mi
->stop_rtt_update
= 1;
1748 * Handles the "send" side of RTT calculation at middle nodes.
1750 * This function calculates the RTT from the middle to the end
1751 * of the circuit by subtracting the last received cell timestamp
1752 * from the current time. It allows back-to-back cells until
1753 * the circuit is opened, to allow for var cell handshakes.
1754 * XXX: Check our var cell plans to make sure this will work.
1757 circpad_estimate_circ_rtt_on_send(circuit_t
*circ
,
1758 circpad_machine_runtime_t
*mi
)
1760 /* Origin circuits don't estimate RTT. They could do it easily enough,
1761 * but they have no reason to use it in any delay calculations. */
1762 if (CIRCUIT_IS_ORIGIN(circ
))
1765 /* If last_received_time_usec is non-zero, we are waiting for a response
1766 * from the exit side. Calculate the time delta and use it as RTT. */
1767 if (mi
->last_received_time_usec
) {
1768 circpad_time_t rtt_time
= monotime_absolute_usec() -
1769 mi
->last_received_time_usec
;
1771 /* Reset the last RTT packet time, so we can tell if two cells
1772 * arrive back to back */
1773 mi
->last_received_time_usec
= 0;
1775 /* Use INT32_MAX to ensure the addition doesn't overflow */
1776 if (rtt_time
>= INT32_MAX
) {
1777 log_fn(LOG_WARN
,LD_CIRC
,
1778 "Circuit padding RTT estimate overflowed: %"PRIu64
1779 " vs %"PRIu64
, monotime_absolute_usec(),
1780 mi
->last_received_time_usec
);
1784 /* If the old RTT estimate is lower than this one, use this one, because
1785 * the circuit is getting longer. If this estimate is somehow
1786 * faster than the previous, then maybe that was network jitter, or a
1787 * bad monotonic clock source (so our ratchet returned a zero delta).
1788 * In that case, average them. */
1789 if (mi
->rtt_estimate_usec
< (circpad_delay_t
)rtt_time
) {
1790 mi
->rtt_estimate_usec
= (circpad_delay_t
)rtt_time
;
1792 mi
->rtt_estimate_usec
+= (circpad_delay_t
)rtt_time
;
1793 mi
->rtt_estimate_usec
/= 2;
1795 } else if (circ
->state
== CIRCUIT_STATE_OPEN
) {
1796 /* If last_received_time_usec is zero, then we have gotten two cells back
1797 * to back. Stop estimating RTT in this case. Note that we only
1798 * stop RTT update if the circuit is opened, to allow for RTT estimates
1799 * of var cells during circ setup. */
1800 if (!mi
->rtt_estimate_usec
&& !mi
->stop_rtt_update
) {
1801 static ratelim_t rtt_lim
= RATELIM_INIT(600);
1802 log_fn_ratelim(&rtt_lim
,LOG_NOTICE
,LD_BUG
,
1803 "Circuit sent two cells back to back before estimating RTT.");
1805 mi
->stop_rtt_update
= 1;
1810 * A "non-padding" cell has been sent from this endpoint. React
1811 * according to any padding state machines on the circuit.
1813 * For origin circuits, this means we sent a cell into the network.
1814 * For middle relay circuits, this means we sent a cell towards the
1818 circpad_cell_event_nonpadding_sent(circuit_t
*on_circ
)
1820 /* Update global cell count */
1821 circpad_global_nonpadding_sent
++;
1823 /* If there are no machines then this loop should not iterate */
1824 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, on_circ
) {
1825 /* First, update any timestamps */
1826 on_circ
->padding_info
[i
]->last_cell_time_sec
= approx_time();
1827 circpad_estimate_circ_rtt_on_send(on_circ
, on_circ
->padding_info
[i
]);
1829 /* Then, do accounting */
1830 circpad_machine_count_nonpadding_sent(on_circ
->padding_info
[i
]);
1832 /* Check to see if we've run out of tokens for this state already,
1833 * and if not, check for other state transitions */
1834 if (check_machine_token_supply(on_circ
->padding_info
[i
])
1835 == CIRCPAD_STATE_UNCHANGED
) {
1836 /* If removing a token did not cause a transition, check if
1837 * non-padding sent event should */
1838 circpad_machine_spec_transition(on_circ
->padding_info
[i
],
1839 CIRCPAD_EVENT_NONPADDING_SENT
);
1841 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
1844 /** Check if this cell or circuit are related to circuit padding and handle
1845 * them if so. Return 0 if the cell was handled in this subsystem and does
1846 * not need any other consideration, otherwise return 1.
1849 circpad_check_received_cell(cell_t
*cell
, circuit_t
*circ
,
1850 crypt_path_t
*layer_hint
,
1851 const relay_header_t
*rh
)
1853 /* First handle the padding commands, since we want to ignore any other
1854 * commands if this circuit is padding-specific. */
1855 switch (rh
->command
) {
1856 case RELAY_COMMAND_DROP
:
1857 /* Already examined in circpad_deliver_recognized_relay_cell_events */
1859 case RELAY_COMMAND_PADDING_NEGOTIATE
:
1860 circpad_handle_padding_negotiate(circ
, cell
);
1862 case RELAY_COMMAND_PADDING_NEGOTIATED
:
1863 if (circpad_handle_padding_negotiated(circ
, cell
, layer_hint
) == 0)
1864 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ
), rh
->length
);
1868 /* If this is a padding circuit we don't need to parse any other commands
1869 * than the padding ones. Just drop them to the floor.
1871 * Note: we deliberately do not call circuit_read_valid_data() here. The
1872 * vanguards addon (specifically the 'bandguards' component's dropped cell
1873 * detection) will thus close this circuit, as it would for any other
1874 * unexpected cell. However, default tor will *not* close the circuit.
1876 * This is intentional. We are not yet certain that is it optimal to keep
1877 * padding circuits open in cases like these, rather than closing them.
1878 * We suspect that continuing to pad is optimal against a passive classifier,
1879 * but as soon as the adversary is active (even as a client adversary) this
1882 * So as a way forward, we log the cell command and circuit number, to
1883 * help us enumerate the most common instances of this in testing with
1884 * vanguards, to see which are common enough to verify and handle
1888 if (circ
->purpose
== CIRCUIT_PURPOSE_C_CIRCUIT_PADDING
) {
1889 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
1890 "Ignored cell (%d) that arrived in padding circuit "
1891 " %u.", rh
->command
, CIRCUIT_IS_ORIGIN(circ
) ?
1892 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0);
1900 * A "non-padding" cell has been received by this endpoint. React
1901 * according to any padding state machines on the circuit.
1903 * For origin circuits, this means we read a cell from the network.
1904 * For middle relay circuits, this means we received a cell from the
1908 circpad_cell_event_nonpadding_received(circuit_t
*on_circ
)
1910 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, on_circ
) {
1911 /* First, update any timestamps */
1912 on_circ
->padding_info
[i
]->last_cell_time_sec
= approx_time();
1913 circpad_estimate_circ_rtt_on_received(on_circ
, on_circ
->padding_info
[i
]);
1915 circpad_machine_spec_transition(on_circ
->padding_info
[i
],
1916 CIRCPAD_EVENT_NONPADDING_RECV
);
1917 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
1921 * A padding cell has been sent from this endpoint. React
1922 * according to any padding state machines on the circuit.
1924 * For origin circuits, this means we sent a cell into the network.
1925 * For middle relay circuits, this means we sent a cell towards the
1929 circpad_cell_event_padding_sent(circuit_t
*on_circ
)
1931 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, on_circ
) {
1932 /* Check to see if we've run out of tokens for this state already,
1933 * and if not, check for other state transitions */
1934 if (check_machine_token_supply(on_circ
->padding_info
[i
])
1935 == CIRCPAD_STATE_UNCHANGED
) {
1936 /* If removing a token did not cause a transition, check if
1937 * non-padding sent event should */
1939 on_circ
->padding_info
[i
]->last_cell_time_sec
= approx_time();
1940 circpad_machine_spec_transition(on_circ
->padding_info
[i
],
1941 CIRCPAD_EVENT_PADDING_SENT
);
1943 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
1947 * A padding cell has been received by this endpoint. React
1948 * according to any padding state machines on the circuit.
1950 * For origin circuits, this means we read a cell from the network.
1951 * For middle relay circuits, this means we received a cell from the
1955 circpad_cell_event_padding_received(circuit_t
*on_circ
)
1957 /* identical to padding sent */
1958 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, on_circ
) {
1959 on_circ
->padding_info
[i
]->last_cell_time_sec
= approx_time();
1960 circpad_machine_spec_transition(on_circ
->padding_info
[i
],
1961 CIRCPAD_EVENT_PADDING_RECV
);
1962 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
1966 * An "infinite" delay has ben chosen from one of our histograms.
1968 * "Infinite" delays mean don't send padding -- but they can also
1969 * mean transition to another state depending on the state machine
1970 * definitions. Check the rules and react accordingly.
1972 * Return 1 if we decide to transition, 0 otherwise.
1975 circpad_internal_event_infinity(circpad_machine_runtime_t
*mi
)
1977 return circpad_machine_spec_transition(mi
, CIRCPAD_EVENT_INFINITY
);
1981 * All of the bins of our current state's histogram's are empty.
1983 * Check to see if this means transition to another state, and if
1984 * not, refill the tokens.
1986 * Return 1 if we decide to transition, 0 otherwise.
1989 circpad_internal_event_bins_empty(circpad_machine_runtime_t
*mi
)
1991 if (circpad_machine_spec_transition(mi
, CIRCPAD_EVENT_BINS_EMPTY
)
1992 == CIRCPAD_STATE_CHANGED
) {
1993 return CIRCPAD_STATE_CHANGED
;
1995 /* If we dont transition, then we refill the tokens */
1996 circpad_machine_setup_tokens(mi
);
1997 return CIRCPAD_STATE_UNCHANGED
;
2002 * This state has used up its cell count. Emit the event and
2003 * see if we transition.
2005 * Return 1 if we decide to transition, 0 otherwise.
2008 circpad_internal_event_state_length_up(circpad_machine_runtime_t
*mi
)
2010 return circpad_machine_spec_transition(mi
, CIRCPAD_EVENT_LENGTH_COUNT
);
2014 * Returns true if the circuit matches the conditions.
2017 circpad_machine_conditions_apply(origin_circuit_t
*circ
,
2018 const circpad_machine_spec_t
*machine
)
2020 /* If padding is disabled, no machines should match/apply. This has
2021 * the effect of shutting down all machines, and not adding any more. */
2022 if (circpad_padding_disabled
|| !get_options()->CircuitPadding
)
2025 /* If the consensus or our torrc has selected reduced connection padding,
2026 * then only allow this machine if it is flagged as acceptable under
2027 * reduced padding conditions */
2028 if (circpad_padding_reduced
|| get_options()->ReducedCircuitPadding
) {
2029 if (!machine
->conditions
.reduced_padding_ok
)
2033 if (!(circpad_circ_purpose_to_mask(TO_CIRCUIT(circ
)->purpose
)
2034 & machine
->conditions
.apply_purpose_mask
))
2037 if (machine
->conditions
.requires_vanguards
) {
2038 const or_options_t
*options
= get_options();
2040 /* Pinned middles are effectively vanguards */
2041 if (!(options
->HSLayer2Nodes
|| options
->HSLayer3Nodes
))
2045 /* We check for any bits set in the circuit state mask so that machines
2046 * can say any of the following through their state bitmask:
2047 * "I want to apply to circuits with either streams or no streams"; OR
2048 * "I only want to apply to circuits with streams"; OR
2049 * "I only want to apply to circuits without streams". */
2050 if (!(circpad_circuit_state(circ
) & machine
->conditions
.apply_state_mask
))
2053 if (circuit_get_cpath_opened_len(circ
) < machine
->conditions
.min_hops
)
2060 * Check to see if any of the keep conditions still apply to this circuit.
2062 * These conditions keep the machines active if they match, but do not
2063 * cause new machines to start up.
2066 circpad_machine_conditions_keep(origin_circuit_t
*circ
,
2067 const circpad_machine_spec_t
*machine
)
2069 if ((circpad_circ_purpose_to_mask(TO_CIRCUIT(circ
)->purpose
)
2070 & machine
->conditions
.keep_purpose_mask
))
2073 if ((circpad_circuit_state(circ
) & machine
->conditions
.keep_state_mask
))
2080 * Returns a minimized representation of the circuit state.
2082 * The padding code only cares if the circuit is building,
2083 * opened, used for streams, and/or still has relay early cells.
2084 * This returns a bitmask of all state properties that apply to
2088 circpad_circuit_state_t
2089 circpad_circuit_state(origin_circuit_t
*circ
)
2091 circpad_circuit_state_t retmask
= 0;
2093 if (circ
->p_streams
)
2094 retmask
|= CIRCPAD_CIRC_STREAMS
;
2096 retmask
|= CIRCPAD_CIRC_NO_STREAMS
;
2098 /* We use has_opened to prevent cannibialized circs from flapping. */
2099 if (circ
->has_opened
)
2100 retmask
|= CIRCPAD_CIRC_OPENED
;
2102 retmask
|= CIRCPAD_CIRC_BUILDING
;
2104 if (circ
->remaining_relay_early_cells
> 0)
2105 retmask
|= CIRCPAD_CIRC_HAS_RELAY_EARLY
;
2107 retmask
|= CIRCPAD_CIRC_HAS_NO_RELAY_EARLY
;
2113 * Convert a normal circuit purpose into a bitmask that we can
2114 * use for determining matching circuits.
2116 circpad_purpose_mask_t
2117 circpad_circ_purpose_to_mask(uint8_t circ_purpose
)
2119 /* Treat OR circ purposes as ignored. They should not be passed here*/
2120 if (BUG(circ_purpose
<= CIRCUIT_PURPOSE_OR_MAX_
)) {
2124 /* Treat new client circuit purposes as "OMG ITS EVERYTHING".
2125 * This also should not happen */
2126 if (BUG(circ_purpose
- CIRCUIT_PURPOSE_OR_MAX_
- 1 > 32)) {
2127 return CIRCPAD_PURPOSE_ALL
;
2130 /* Convert the purpose to a bit position */
2131 return 1 << (circ_purpose
- CIRCUIT_PURPOSE_OR_MAX_
- 1);
2135 * Shut down any machines whose conditions no longer match
2136 * the current circuit.
2139 circpad_shutdown_old_machines(origin_circuit_t
*on_circ
)
2141 circuit_t
*circ
= TO_CIRCUIT(on_circ
);
2143 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i
, circ
) {
2144 /* We shut down a machine if neither the apply conditions
2145 * nor the keep conditions match. If either set of conditions match,
2146 * keep it around. */
2147 if (!circpad_machine_conditions_apply(on_circ
,
2148 circ
->padding_machine
[i
]) &&
2149 !circpad_machine_conditions_keep(on_circ
,
2150 circ
->padding_machine
[i
])) {
2151 uint32_t machine_ctr
= circ
->padding_info
[i
]->machine_ctr
;
2152 // Clear machineinfo (frees timers)
2153 circpad_circuit_machineinfo_free_idx(circ
, i
);
2154 // Send padding negotiate stop
2155 circpad_negotiate_padding(on_circ
,
2156 circ
->padding_machine
[i
]->machine_num
,
2157 circ
->padding_machine
[i
]->target_hopnum
,
2158 CIRCPAD_COMMAND_STOP
,
2161 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END
;
2165 * Negotiate new machines that would apply to this circuit, given the machines
2166 * inside <b>machines_sl</b>.
2168 * This function checks to see if we have any free machine indexes,
2169 * and for each free machine index, it initializes the most recently
2170 * added origin-side padding machine that matches the target machine
2171 * index and circuit conditions, and negotiates it with the appropriate
2175 circpad_add_matching_machines(origin_circuit_t
*on_circ
,
2176 smartlist_t
*machines_sl
)
2178 circuit_t
*circ
= TO_CIRCUIT(on_circ
);
2180 #ifdef TOR_UNIT_TESTS
2181 /* Tests don't have to init our padding machines */
2186 /* If padding negotiation failed before, do not try again */
2187 if (on_circ
->padding_negotiation_failed
)
2190 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i
) {
2191 /* If there is a padding machine info, this index is occupied.
2192 * No need to check conditions for this index. */
2193 if (circ
->padding_info
[i
])
2196 /* We have a free machine index. Check the origin padding
2197 * machines in reverse order, so that more recently added
2198 * machines take priority over older ones. */
2199 SMARTLIST_FOREACH_REVERSE_BEGIN(machines_sl
,
2200 circpad_machine_spec_t
*,
2202 /* Machine definitions have a specific target machine index.
2203 * This is so event ordering is deterministic with respect
2204 * to which machine gets events first when there are two
2205 * machines installed on a circuit. Make sure we only
2206 * add this machine if its target machine index is free. */
2207 if (machine
->machine_index
== i
&&
2208 circpad_machine_conditions_apply(on_circ
, machine
)) {
2210 // We can only replace this machine if the target hopnum
2211 // is the same, otherwise we'll get invalid data
2212 if (circ
->padding_machine
[i
]) {
2213 if (circ
->padding_machine
[i
]->target_hopnum
!=
2214 machine
->target_hopnum
)
2216 /* Replace it. (Don't free - is global). */
2217 circ
->padding_machine
[i
] = NULL
;
2220 /* Set up the machine immediately so that the slot is occupied.
2221 * We will tear it down on error return, or if there is an error
2222 * response from the relay. */
2223 circpad_setup_machine_on_circ(circ
, machine
);
2224 if (circpad_negotiate_padding(on_circ
, machine
->machine_num
,
2225 machine
->target_hopnum
,
2226 CIRCPAD_COMMAND_START
,
2227 circ
->padding_machine_ctr
) < 0) {
2229 "Padding not negotiated. Cleaning machine from circuit %u",
2230 CIRCUIT_IS_ORIGIN(circ
) ?
2231 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0);
2232 circpad_circuit_machineinfo_free_idx(circ
, i
);
2233 circ
->padding_machine
[i
] = NULL
;
2234 on_circ
->padding_negotiation_failed
= 1;
2236 /* Success. Don't try any more machines on this index */
2240 } SMARTLIST_FOREACH_END(machine
);
2241 } FOR_EACH_CIRCUIT_MACHINE_END
;
2245 * Event that tells us we added a hop to an origin circuit.
2247 * This event is used to decide if we should create a padding machine
2251 circpad_machine_event_circ_added_hop(origin_circuit_t
*on_circ
)
2253 /* Since our padding conditions do not specify a max_hops,
2254 * all we can do is add machines here */
2255 circpad_add_matching_machines(on_circ
, origin_padding_machines
);
2259 * Event that tells us that an origin circuit is now built.
2261 * Shut down any machines that only applied to un-built circuits.
2262 * Activate any new ones.
2265 circpad_machine_event_circ_built(origin_circuit_t
*circ
)
2267 circpad_shutdown_old_machines(circ
);
2268 circpad_add_matching_machines(circ
, origin_padding_machines
);
2272 * Circpad purpose changed event.
2274 * Shut down any machines that don't apply to our circ purpose.
2275 * Activate any new ones that do.
2278 circpad_machine_event_circ_purpose_changed(origin_circuit_t
*circ
)
2280 circpad_shutdown_old_machines(circ
);
2281 circpad_add_matching_machines(circ
, origin_padding_machines
);
2285 * Event that tells us that an origin circuit is out of RELAY_EARLY
2288 * Shut down any machines that only applied to RELAY_EARLY circuits.
2289 * Activate any new ones.
2292 circpad_machine_event_circ_has_no_relay_early(origin_circuit_t
*circ
)
2294 circpad_shutdown_old_machines(circ
);
2295 circpad_add_matching_machines(circ
, origin_padding_machines
);
2299 * Streams attached event.
2301 * Called from link_apconn_to_circ() and handle_hs_exit_conn()
2303 * Shut down any machines that only applied to machines without
2304 * streams. Activate any new ones.
2307 circpad_machine_event_circ_has_streams(origin_circuit_t
*circ
)
2309 circpad_shutdown_old_machines(circ
);
2310 circpad_add_matching_machines(circ
, origin_padding_machines
);
2314 * Streams detached event.
2316 * Called from circuit_detach_stream()
2318 * Shut down any machines that only applied to machines without
2319 * streams. Activate any new ones.
2322 circpad_machine_event_circ_has_no_streams(origin_circuit_t
*circ
)
2324 circpad_shutdown_old_machines(circ
);
2325 circpad_add_matching_machines(circ
, origin_padding_machines
);
2329 * Verify that padding is coming from the expected hop.
2331 * Returns true if from_hop matches the target hop from
2332 * one of our padding machines.
2334 * Returns false if we're not an origin circuit, or if from_hop
2335 * does not match one of the padding machines.
2338 circpad_padding_is_from_expected_hop(circuit_t
*circ
,
2339 crypt_path_t
*from_hop
)
2341 crypt_path_t
*target_hop
= NULL
;
2342 if (!CIRCUIT_IS_ORIGIN(circ
))
2345 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i
) {
2346 /* We have to check padding_machine and not padding_info/active
2347 * machines here because padding may arrive after we shut down a
2348 * machine. The info is gone, but the padding_machine waits
2349 * for the padding_negotiated response to come back. */
2350 if (!circ
->padding_machine
[i
])
2353 target_hop
= circuit_get_cpath_hop(TO_ORIGIN_CIRCUIT(circ
),
2354 circ
->padding_machine
[i
]->target_hopnum
);
2356 if (target_hop
== from_hop
)
2358 } FOR_EACH_CIRCUIT_MACHINE_END
;
2364 * Deliver circpad events for an "unrecognized cell".
2366 * Unrecognized cells are sent to relays and are forwarded
2367 * onto the next hop of their circuits. Unrecognized cells
2368 * are by definition not padding. We need to tell relay-side
2369 * state machines that a non-padding cell was sent or received,
2370 * depending on the direction, so they can update their histograms
2371 * and decide to pad or not.
2374 circpad_deliver_unrecognized_cell_events(circuit_t
*circ
,
2375 cell_direction_t dir
)
2377 // We should never see unrecognized cells at origin.
2378 // Our caller emits a warn when this happens.
2379 if (CIRCUIT_IS_ORIGIN(circ
)) {
2383 if (dir
== CELL_DIRECTION_OUT
) {
2384 /* When direction is out (away from origin), then we received non-padding
2385 cell coming from the origin to us. */
2386 circpad_cell_event_nonpadding_received(circ
);
2387 } else if (dir
== CELL_DIRECTION_IN
) {
2388 /* It's in and not origin, so the cell is going away from us.
2389 * So we are relaying a non-padding cell towards the origin. */
2390 circpad_cell_event_nonpadding_sent(circ
);
2395 * Deliver circpad events for "recognized" relay cells.
2397 * Recognized cells are destined for this hop, either client or middle.
2398 * Check if this is a padding cell or not, and send the appropriate
2402 circpad_deliver_recognized_relay_cell_events(circuit_t
*circ
,
2403 uint8_t relay_command
,
2404 crypt_path_t
*layer_hint
)
2406 if (relay_command
== RELAY_COMMAND_DROP
) {
2407 rep_hist_padding_count_read(PADDING_TYPE_DROP
);
2409 if (CIRCUIT_IS_ORIGIN(circ
)) {
2410 if (circpad_padding_is_from_expected_hop(circ
, layer_hint
)) {
2411 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ
), 0);
2413 /* This is unexpected padding. Ignore it for now. */
2418 /* The cell should be recognized by now, which means that we are on the
2419 destination, which means that we received a padding cell. We might be
2420 the client or the Middle node, still, because leaky-pipe. */
2421 circpad_cell_event_padding_received(circ
);
2422 log_fn(LOG_INFO
, LD_CIRC
, "Got padding cell on %s circuit %u.",
2423 CIRCUIT_IS_ORIGIN(circ
) ? "origin" : "non-origin",
2424 CIRCUIT_IS_ORIGIN(circ
) ?
2425 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
: 0);
2427 /* We received a non-padding cell on the edge */
2428 circpad_cell_event_nonpadding_received(circ
);
2433 * Deliver circpad events for relay cells sent from us.
2435 * If this is a padding cell, update our padding stats
2436 * and deliver the event. Otherwise just deliver the event.
2439 circpad_deliver_sent_relay_cell_events(circuit_t
*circ
,
2440 uint8_t relay_command
)
2442 /* RELAY_COMMAND_DROP is the multi-hop (aka circuit-level) padding cell in
2443 * tor. (CELL_PADDING is a channel-level padding cell, which is not relayed
2444 * or processed here).
2446 * We do generate events for PADDING_NEGOTIATE and PADDING_NEGOTIATED cells.
2448 if (relay_command
== RELAY_COMMAND_DROP
) {
2449 /* Optimization: The event for RELAY_COMMAND_DROP is sent directly
2450 * from circpad_send_padding_cell_for_callback(). This is to avoid
2451 * putting a cell_t and a relay_header_t on the stack repeatedly
2452 * if we decide to send a long train of padding cells back-to-back
2453 * with 0 delay. So we do nothing here. */
2456 /* This is a non-padding cell sent from the client or from
2458 circpad_cell_event_nonpadding_sent(circ
);
2463 * Initialize the states array for a circpad machine.
2466 circpad_machine_states_init(circpad_machine_spec_t
*machine
,
2467 circpad_statenum_t num_states
)
2469 if (BUG(num_states
> CIRCPAD_MAX_MACHINE_STATES
)) {
2470 num_states
= CIRCPAD_MAX_MACHINE_STATES
;
2473 machine
->num_states
= num_states
;
2474 machine
->states
= tor_malloc_zero(sizeof(circpad_state_t
)*num_states
);
2476 /* Initialize the default next state for all events to
2477 * "ignore" -- if events aren't specified, they are ignored. */
2478 for (circpad_statenum_t s
= 0; s
< num_states
; s
++) {
2479 for (int e
= 0; e
< CIRCPAD_NUM_EVENTS
; e
++) {
2480 machine
->states
[s
].next_state
[e
] = CIRCPAD_STATE_IGNORE
;
2486 circpad_setup_machine_on_circ(circuit_t
*on_circ
,
2487 const circpad_machine_spec_t
*machine
)
2489 if (CIRCUIT_IS_ORIGIN(on_circ
) && !machine
->is_origin_side
) {
2490 log_fn(LOG_WARN
, LD_BUG
,
2491 "Can't set up non-origin machine on origin circuit!");
2495 if (!CIRCUIT_IS_ORIGIN(on_circ
) && machine
->is_origin_side
) {
2496 log_fn(LOG_WARN
, LD_BUG
,
2497 "Can't set up origin machine on non-origin circuit!");
2501 IF_BUG_ONCE(on_circ
->padding_machine
[machine
->machine_index
] != NULL
) {
2504 IF_BUG_ONCE(on_circ
->padding_info
[machine
->machine_index
] != NULL
) {
2509 if (CIRCUIT_IS_ORIGIN(on_circ
)) {
2510 log_info(LD_CIRC
, "Registering machine %s to origin circ %u (%d)",
2512 TO_ORIGIN_CIRCUIT(on_circ
)->global_identifier
, on_circ
->purpose
);
2514 log_info(LD_CIRC
, "Registering machine %s to non-origin circ (%d)",
2515 machine
->name
, on_circ
->purpose
);
2518 /* Padding machine ctr starts at 1, so we increment this ctr first.
2519 * (machine ctr of 0 means "any machine").
2521 * See https://bugs.tororject.org/30992. */
2522 on_circ
->padding_machine_ctr
++;
2524 /* uint32 wraparound check: 0 is special, just wrap to 1 */
2525 if (on_circ
->padding_machine_ctr
== 0) {
2526 on_circ
->padding_machine_ctr
= 1;
2529 on_circ
->padding_info
[machine
->machine_index
] =
2530 circpad_circuit_machineinfo_new(on_circ
, machine
->machine_index
);
2531 on_circ
->padding_machine
[machine
->machine_index
] = machine
;
2534 /** Validate a single state of a padding machine */
2536 padding_machine_state_is_valid(const circpad_state_t
*state
)
2539 uint32_t tokens_count
= 0;
2540 circpad_delay_t prev_bin_edge
= 0;
2542 /* We only validate histograms */
2543 if (!state
->histogram_len
) {
2547 /* We need at least two bins in a histogram */
2548 if (state
->histogram_len
< 2) {
2549 log_warn(LD_CIRC
, "You can't have a histogram with less than 2 bins");
2553 /* For each machine state, if it's a histogram, make sure all the
2554 * histogram edges are well defined (i.e. are strictly monotonic). */
2555 for (b
= 0 ; b
< state
->histogram_len
; b
++) {
2556 /* Check that histogram edges are strictly increasing. Ignore the first
2557 * edge since it can be zero. */
2558 if (prev_bin_edge
>= state
->histogram_edges
[b
] && b
> 0) {
2559 log_warn(LD_CIRC
, "Histogram edges are not increasing [%u/%u]",
2560 prev_bin_edge
, state
->histogram_edges
[b
]);
2564 prev_bin_edge
= state
->histogram_edges
[b
];
2566 /* Also count the number of tokens as we go through the histogram states */
2567 tokens_count
+= state
->histogram
[b
];
2569 /* Verify that the total number of tokens is correct */
2570 if (tokens_count
!= state
->histogram_total_tokens
) {
2571 log_warn(LD_CIRC
, "Histogram token count is wrong [%u/%u]",
2572 tokens_count
, state
->histogram_total_tokens
);
2579 /** Basic validation of padding machine */
2581 padding_machine_is_valid(const circpad_machine_spec_t
*machine
)
2585 /* Validate the histograms of the padding machine */
2586 for (i
= 0 ; i
< machine
->num_states
; i
++) {
2587 if (!padding_machine_state_is_valid(&machine
->states
[i
])) {
2595 /* Validate and register <b>machine</b> into <b>machine_list</b>. If
2596 * <b>machine_list</b> is NULL, then just validate. */
2598 circpad_register_padding_machine(circpad_machine_spec_t
*machine
,
2599 smartlist_t
*machine_list
)
2601 if (!padding_machine_is_valid(machine
)) {
2602 log_warn(LD_CIRC
, "Machine #%u is invalid. Ignoring.",
2603 machine
->machine_num
);
2608 smartlist_add(machine_list
, machine
);
2612 #ifdef TOR_UNIT_TESTS
2613 /* These padding machines are only used for tests pending #28634. */
2615 circpad_circ_client_machine_init(void)
2617 circpad_machine_spec_t
*circ_client_machine
2618 = tor_malloc_zero(sizeof(circpad_machine_spec_t
));
2620 circ_client_machine
->conditions
.min_hops
= 2;
2621 circ_client_machine
->conditions
.apply_state_mask
=
2622 CIRCPAD_CIRC_BUILDING
|CIRCPAD_CIRC_OPENED
|CIRCPAD_CIRC_HAS_RELAY_EARLY
;
2623 circ_client_machine
->conditions
.apply_purpose_mask
= CIRCPAD_PURPOSE_ALL
;
2624 circ_client_machine
->conditions
.reduced_padding_ok
= 1;
2626 circ_client_machine
->target_hopnum
= 2;
2627 circ_client_machine
->is_origin_side
= 1;
2629 /* Start, gap, burst */
2630 circpad_machine_states_init(circ_client_machine
, 3);
2632 circ_client_machine
->states
[CIRCPAD_STATE_START
].
2633 next_state
[CIRCPAD_EVENT_NONPADDING_RECV
] = CIRCPAD_STATE_BURST
;
2635 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].
2636 next_state
[CIRCPAD_EVENT_NONPADDING_RECV
] = CIRCPAD_STATE_BURST
;
2637 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].
2638 next_state
[CIRCPAD_EVENT_PADDING_RECV
] = CIRCPAD_STATE_BURST
;
2640 /* If we are in burst state, and we send a non-padding cell, then we cancel
2641 the timer for the next padding cell:
2642 We dont want to send fake extends when actual extends are going on */
2643 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].
2644 next_state
[CIRCPAD_EVENT_NONPADDING_SENT
] = CIRCPAD_STATE_CANCEL
;
2646 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].
2647 next_state
[CIRCPAD_EVENT_BINS_EMPTY
] = CIRCPAD_STATE_END
;
2649 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].token_removal
=
2650 CIRCPAD_TOKEN_REMOVAL_CLOSEST
;
2652 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].histogram_len
= 2;
2653 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].histogram_edges
[0]= 500;
2654 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].histogram_edges
[1]= 1000000;
2656 /* We have 5 tokens in the histogram, which means that all circuits will look
2657 * like they have 7 hops (since we start this machine after the second hop,
2658 * and tokens are decremented for any valid hops, and fake extends are
2659 * used after that -- 2+5==7). */
2660 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].histogram
[0] = 5;
2662 circ_client_machine
->states
[CIRCPAD_STATE_BURST
].histogram_total_tokens
= 5;
2664 circ_client_machine
->machine_num
= smartlist_len(origin_padding_machines
);
2665 circpad_register_padding_machine(circ_client_machine
,
2666 origin_padding_machines
);
2670 circpad_circ_responder_machine_init(void)
2672 circpad_machine_spec_t
*circ_responder_machine
2673 = tor_malloc_zero(sizeof(circpad_machine_spec_t
));
2675 /* Shut down the machine after we've sent enough packets */
2676 circ_responder_machine
->should_negotiate_end
= 1;
2678 /* The relay-side doesn't care what hopnum it is, but for consistency,
2679 * let's match the client */
2680 circ_responder_machine
->target_hopnum
= 2;
2681 circ_responder_machine
->is_origin_side
= 0;
2683 /* Start, gap, burst */
2684 circpad_machine_states_init(circ_responder_machine
, 3);
2686 /* This is the settings of the state machine. In the future we are gonna
2687 serialize this into the consensus or the torrc */
2689 /* We transition to the burst state on padding receive and on non-padding
2691 circ_responder_machine
->states
[CIRCPAD_STATE_START
].
2692 next_state
[CIRCPAD_EVENT_PADDING_RECV
] = CIRCPAD_STATE_BURST
;
2693 circ_responder_machine
->states
[CIRCPAD_STATE_START
].
2694 next_state
[CIRCPAD_EVENT_NONPADDING_RECV
] = CIRCPAD_STATE_BURST
;
2696 /* Inside the burst state we _stay_ in the burst state when a non-padding
2698 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].
2699 next_state
[CIRCPAD_EVENT_NONPADDING_SENT
] = CIRCPAD_STATE_BURST
;
2701 /* Inside the burst state we transition to the gap state when we receive a
2703 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].
2704 next_state
[CIRCPAD_EVENT_PADDING_RECV
] = CIRCPAD_STATE_GAP
;
2706 /* These describe the padding charasteristics when in burst state */
2708 /* use_rtt_estimate tries to estimate how long padding cells take to go from
2709 C->M, and uses that as what as the base of the histogram */
2710 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].use_rtt_estimate
= 1;
2711 /* The histogram is 2 bins: an empty one, and infinity */
2712 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].histogram_len
= 2;
2713 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].histogram_edges
[0]= 500;
2714 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].histogram_edges
[1] =
2716 /* During burst state we wait forever for padding to arrive.
2718 We are waiting for a padding cell from the client to come in, so that we
2719 respond, and we imitate how extend looks like */
2720 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].histogram
[0] = 0;
2721 // Only infinity bin:
2722 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].histogram
[1] = 1;
2723 circ_responder_machine
->states
[CIRCPAD_STATE_BURST
].
2724 histogram_total_tokens
= 1;
2726 /* From the gap state, we _stay_ in the gap state, when we receive padding
2728 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].
2729 next_state
[CIRCPAD_EVENT_PADDING_RECV
] = CIRCPAD_STATE_GAP
;
2730 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].
2731 next_state
[CIRCPAD_EVENT_NONPADDING_RECV
] = CIRCPAD_STATE_GAP
;
2733 /* And from the gap state, we go to the end, when the bins are empty or a
2734 * non-padding cell is sent */
2735 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].
2736 next_state
[CIRCPAD_EVENT_BINS_EMPTY
] = CIRCPAD_STATE_END
;
2737 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].
2738 next_state
[CIRCPAD_EVENT_NONPADDING_SENT
] = CIRCPAD_STATE_END
;
2740 // FIXME: Tune this histogram
2742 /* The gap state is the delay you wait after you receive a padding cell
2743 before you send a padding response */
2744 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].use_rtt_estimate
= 1;
2745 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_len
= 6;
2746 /* Specify histogram bins */
2747 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[0]= 500;
2748 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[1]= 1000;
2749 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[2]= 2000;
2750 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[3]= 4000;
2751 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[4]= 8000;
2752 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_edges
[5]= 16000;
2753 /* Specify histogram tokens */
2754 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram
[0] = 0;
2755 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram
[1] = 1;
2756 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram
[2] = 2;
2757 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram
[3] = 2;
2758 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram
[4] = 1;
2759 /* Total number of tokens */
2760 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].histogram_total_tokens
= 6;
2762 circ_responder_machine
->states
[CIRCPAD_STATE_GAP
].token_removal
=
2763 CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC
;
2765 circ_responder_machine
->machine_num
= smartlist_len(relay_padding_machines
);
2766 circpad_register_padding_machine(circ_responder_machine
,
2767 relay_padding_machines
);
2769 #endif /* defined(TOR_UNIT_TESTS) */
2772 * Initialize all of our padding machines.
2774 * This is called at startup. It sets up some global machines, and then
2775 * loads some from torrc, and from the tor consensus.
2778 circpad_machines_init(void)
2780 tor_assert_nonfatal(origin_padding_machines
== NULL
);
2781 tor_assert_nonfatal(relay_padding_machines
== NULL
);
2783 origin_padding_machines
= smartlist_new();
2784 relay_padding_machines
= smartlist_new();
2786 /* Register machines for hiding client-side intro circuits */
2787 circpad_machine_client_hide_intro_circuits(origin_padding_machines
);
2788 circpad_machine_relay_hide_intro_circuits(relay_padding_machines
);
2790 /* Register machines for hiding client-side rendezvous circuits */
2791 circpad_machine_client_hide_rend_circuits(origin_padding_machines
);
2792 circpad_machine_relay_hide_rend_circuits(relay_padding_machines
);
2794 // TODO: Parse machines from consensus and torrc
2795 #ifdef TOR_UNIT_TESTS
2796 circpad_circ_client_machine_init();
2797 circpad_circ_responder_machine_init();
2802 * Free our padding machines
2805 circpad_machines_free(void)
2807 if (origin_padding_machines
) {
2808 SMARTLIST_FOREACH(origin_padding_machines
,
2809 circpad_machine_spec_t
*,
2810 m
, tor_free(m
->states
); tor_free(m
));
2811 smartlist_free(origin_padding_machines
);
2814 if (relay_padding_machines
) {
2815 SMARTLIST_FOREACH(relay_padding_machines
,
2816 circpad_machine_spec_t
*,
2817 m
, tor_free(m
->states
); tor_free(m
));
2818 smartlist_free(relay_padding_machines
);
2823 * Check the Protover info to see if a node supports padding.
2826 circpad_node_supports_padding(const node_t
*node
)
2829 log_fn(LOG_INFO
, LD_CIRC
, "Checking padding: %s",
2830 node
->rs
->pv
.supports_hs_setup_padding
?
2831 "supported" : "unsupported");
2832 return node
->rs
->pv
.supports_hs_setup_padding
;
2835 log_fn(LOG_INFO
, LD_CIRC
, "Empty routerstatus in padding check");
2840 * Get a node_t for the nth hop in our circuit, starting from 1.
2842 * Returns node_t from the consensus for that hop, if it is opened.
2843 * Otherwise returns NULL.
2845 MOCK_IMPL(STATIC
const node_t
*,
2846 circuit_get_nth_node
,(origin_circuit_t
*circ
, int hop
))
2848 crypt_path_t
*iter
= circuit_get_cpath_hop(circ
, hop
);
2850 if (!iter
|| iter
->state
!= CPATH_STATE_OPEN
)
2853 return node_get_by_id(iter
->extend_info
->identity_digest
);
2857 * Return true if a particular circuit supports padding
2858 * at the desired hop.
2861 circpad_circuit_supports_padding(origin_circuit_t
*circ
,
2866 if (!(hop
= circuit_get_nth_node(circ
, target_hopnum
))) {
2870 return circpad_node_supports_padding(hop
);
2874 * Try to negotiate padding.
2876 * Returns -1 on error, 0 on success.
2879 circpad_negotiate_padding(origin_circuit_t
*circ
,
2880 circpad_machine_num_t machine
,
2881 uint8_t target_hopnum
,
2883 uint32_t machine_ctr
)
2885 circpad_negotiate_t type
;
2889 /* Check that the target hop lists support for padding in
2890 * its ProtoVer fields */
2891 if (!circpad_circuit_supports_padding(circ
, target_hopnum
)) {
2895 memset(&cell
, 0, sizeof(cell_t
));
2896 memset(&type
, 0, sizeof(circpad_negotiate_t
));
2897 // This gets reset to RELAY_EARLY appropriately by
2898 // relay_send_command_from_edge_. At least, it looks that way.
2899 // QQQ-MP-AP: Verify that.
2900 cell
.command
= CELL_RELAY
;
2902 circpad_negotiate_set_command(&type
, command
);
2903 circpad_negotiate_set_version(&type
, 0);
2904 circpad_negotiate_set_machine_type(&type
, machine
);
2905 circpad_negotiate_set_machine_ctr(&type
, machine_ctr
);
2907 if ((len
= circpad_negotiate_encode(cell
.payload
, CELL_PAYLOAD_SIZE
,
2911 log_fn(LOG_INFO
,LD_CIRC
,
2912 "Negotiating padding on circuit %u (%d), command %d, for ctr %u",
2913 circ
->global_identifier
, TO_CIRCUIT(circ
)->purpose
, command
,
2916 return circpad_send_command_to_hop(circ
, target_hopnum
,
2917 RELAY_COMMAND_PADDING_NEGOTIATE
,
2922 * Try to negotiate padding.
2924 * Returns 1 if successful (or already set up), 0 otherwise.
2927 circpad_padding_negotiated(circuit_t
*circ
,
2928 circpad_machine_num_t machine
,
2931 uint32_t machine_ctr
)
2933 circpad_negotiated_t type
;
2937 memset(&cell
, 0, sizeof(cell_t
));
2938 memset(&type
, 0, sizeof(circpad_negotiated_t
));
2939 // This gets reset to RELAY_EARLY appropriately by
2940 // relay_send_command_from_edge_. At least, it looks that way.
2941 // QQQ-MP-AP: Verify that.
2942 cell
.command
= CELL_RELAY
;
2944 circpad_negotiated_set_command(&type
, command
);
2945 circpad_negotiated_set_response(&type
, response
);
2946 circpad_negotiated_set_version(&type
, 0);
2947 circpad_negotiated_set_machine_type(&type
, machine
);
2948 circpad_negotiated_set_machine_ctr(&type
, machine_ctr
);
2950 if ((len
= circpad_negotiated_encode(cell
.payload
, CELL_PAYLOAD_SIZE
,
2954 /* Use relay_send because we're from the middle to the origin. We don't
2955 * need to specify a target hop or layer_hint. */
2956 return relay_send_command_from_edge(0, circ
,
2957 RELAY_COMMAND_PADDING_NEGOTIATED
,
2958 (void*)cell
.payload
,
2959 (size_t)len
, NULL
) == 0;
2963 * Parse and react to a padding_negotiate cell.
2965 * This is called at the middle node upon receipt of the client's choice of
2966 * state machine, so that it can use the requested state machine index, if
2969 * Returns -1 on error, 0 on success.
2972 circpad_handle_padding_negotiate(circuit_t
*circ
, cell_t
*cell
)
2975 /* Should we send back a STOP cell? */
2976 bool respond_with_stop
= true;
2977 circpad_negotiate_t
*negotiate
;
2979 if (CIRCUIT_IS_ORIGIN(circ
)) {
2980 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
2981 "Padding negotiate cell unsupported at origin (circuit %u)",
2982 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
);
2986 if (circpad_negotiate_parse(&negotiate
, cell
->payload
+RELAY_HEADER_SIZE
,
2987 CELL_PAYLOAD_SIZE
-RELAY_HEADER_SIZE
) < 0) {
2988 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
2989 "Received malformed PADDING_NEGOTIATE cell; dropping.");
2993 if (negotiate
->command
== CIRCPAD_COMMAND_STOP
) {
2994 /* Free the machine corresponding to this machine type */
2995 if (free_circ_machineinfos_with_machine_num(circ
,
2996 negotiate
->machine_type
,
2997 negotiate
->machine_ctr
)) {
2998 log_info(LD_CIRC
, "Received STOP command for machine %u, ctr %u",
2999 negotiate
->machine_type
, negotiate
->machine_ctr
);
3003 /* If we reached this point we received a STOP command from an old or
3004 unknown machine. Don't reply with our own STOP since there is no one to
3005 handle it on the other end */
3006 respond_with_stop
= false;
3008 if (negotiate
->machine_ctr
<= circ
->padding_machine_ctr
) {
3009 log_info(LD_CIRC
, "Received STOP command for old machine %u, ctr %u",
3010 negotiate
->machine_type
, negotiate
->machine_ctr
);
3014 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3015 "Received circuit padding stop command for unknown machine.");
3018 } else if (negotiate
->command
== CIRCPAD_COMMAND_START
) {
3019 SMARTLIST_FOREACH_BEGIN(relay_padding_machines
,
3020 const circpad_machine_spec_t
*, m
) {
3021 if (m
->machine_num
== negotiate
->machine_type
) {
3022 circpad_setup_machine_on_circ(circ
, m
);
3023 if (negotiate
->machine_ctr
&&
3024 circ
->padding_machine_ctr
!= negotiate
->machine_ctr
) {
3025 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3026 "Client and relay have different counts for padding machines: "
3027 "%u vs %u", circ
->padding_machine_ctr
, negotiate
->machine_ctr
);
3029 circpad_cell_event_nonpadding_received(circ
);
3032 } SMARTLIST_FOREACH_END(m
);
3039 if (respond_with_stop
) {
3040 circpad_padding_negotiated(circ
, negotiate
->machine_type
,
3042 (retval
== 0) ? CIRCPAD_RESPONSE_OK
: CIRCPAD_RESPONSE_ERR
,
3043 negotiate
->machine_ctr
);
3046 circpad_negotiate_free(negotiate
);
3052 * Parse and react to a padding_negotiated cell.
3054 * This is called at the origin upon receipt of the middle's response
3055 * to our choice of state machine.
3057 * Returns -1 on error, 0 on success.
3060 circpad_handle_padding_negotiated(circuit_t
*circ
, cell_t
*cell
,
3061 crypt_path_t
*layer_hint
)
3063 circpad_negotiated_t
*negotiated
;
3065 if (!CIRCUIT_IS_ORIGIN(circ
)) {
3066 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3067 "Padding negotiated cell unsupported at non-origin.");
3071 /* Verify this came from the expected hop */
3072 if (!circpad_padding_is_from_expected_hop(circ
, layer_hint
)) {
3073 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3074 "Padding negotiated cell from wrong hop on circuit %u",
3075 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
);
3079 if (circpad_negotiated_parse(&negotiated
, cell
->payload
+RELAY_HEADER_SIZE
,
3080 CELL_PAYLOAD_SIZE
-RELAY_HEADER_SIZE
) < 0) {
3081 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3082 "Received malformed PADDING_NEGOTIATED cell on circuit %u; "
3083 "dropping.", TO_ORIGIN_CIRCUIT(circ
)->global_identifier
);
3087 if (negotiated
->command
== CIRCPAD_COMMAND_STOP
) {
3089 "Received STOP command on PADDING_NEGOTIATED for circuit %u",
3090 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
);
3091 /* There may not be a padding_info here if we shut down the
3092 * machine in circpad_shutdown_old_machines(). Or, if
3093 * circpad_add_matching_matchines() added a new machine,
3094 * there may be a padding_machine for a different machine num
3095 * than this response. */
3096 free_circ_machineinfos_with_machine_num(circ
, negotiated
->machine_type
,
3097 negotiated
->machine_ctr
);
3098 } else if (negotiated
->command
== CIRCPAD_COMMAND_START
&&
3099 negotiated
->response
== CIRCPAD_RESPONSE_ERR
) {
3100 // This can still happen due to consensus drift.. free the machines
3102 if (free_circ_machineinfos_with_machine_num(circ
, negotiated
->machine_type
,
3103 negotiated
->machine_ctr
)) {
3104 // Only fail if a machine was there and matched the error cell
3105 TO_ORIGIN_CIRCUIT(circ
)->padding_negotiation_failed
= 1;
3106 log_fn(LOG_PROTOCOL_WARN
, LD_CIRC
,
3107 "Middle node did not accept our padding request on circuit "
3109 TO_ORIGIN_CIRCUIT(circ
)->global_identifier
,
3114 circpad_negotiated_free(negotiated
);
3118 /** Free memory allocated by this machine spec. */
3120 machine_spec_free_(circpad_machine_spec_t
*m
)
3124 tor_free(m
->states
);
3128 /** Free all memory allocated by the circuitpadding subsystem. */
3130 circpad_free_all(void)
3132 if (origin_padding_machines
) {
3133 SMARTLIST_FOREACH_BEGIN(origin_padding_machines
,
3134 circpad_machine_spec_t
*, m
) {
3135 machine_spec_free(m
);
3136 } SMARTLIST_FOREACH_END(m
);
3137 smartlist_free(origin_padding_machines
);
3139 if (relay_padding_machines
) {
3140 SMARTLIST_FOREACH_BEGIN(relay_padding_machines
,
3141 circpad_machine_spec_t
*, m
) {
3142 machine_spec_free(m
);
3143 } SMARTLIST_FOREACH_END(m
);
3144 smartlist_free(relay_padding_machines
);
3149 // TODO: Should we use keyword=value here? Are there helpers for that?
3152 circpad_state_serialize(const circpad_state_t
*state
,
3153 smartlist_t
*chunks
)
3155 smartlist_add_asprintf(chunks
, " %u", state
->histogram
[0]);
3156 for (int i
= 1; i
< state
->histogram_len
; i
++) {
3157 smartlist_add_asprintf(chunks
, ",%u",
3158 state
->histogram
[i
]);
3161 smartlist_add_asprintf(chunks
, " 0x%x",
3162 state
->transition_cancel_events
);
3164 for (int i
= 0; i
< CIRCPAD_NUM_STATES
; i
++) {
3165 smartlist_add_asprintf(chunks
, ",0x%x",
3166 state
->transition_events
[i
]);
3169 smartlist_add_asprintf(chunks
, " %u %u",
3170 state
->use_rtt_estimate
,
3171 state
->token_removal
);
3175 circpad_machine_spec_to_string(const circpad_machine_spec_t
*machine
)
3177 smartlist_t
*chunks
= smartlist_new();
3181 circpad_state_serialize(&machine
->start
, chunks
);
3182 circpad_state_serialize(&machine
->gap
, chunks
);
3183 circpad_state_serialize(&machine
->burst
, chunks
);
3185 out
= smartlist_join_strings(chunks
, "", 0, NULL
);
3187 SMARTLIST_FOREACH(chunks
, char *, cp
, tor_free(cp
));
3188 smartlist_free(chunks
);
3193 const circpad_machine_spec_t
*
3194 circpad_string_to_machine(const char *str
)