1 /* Copyright (c) 2017-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \file test_hs_common.c
6 * \brief Test hidden service common functionalities.
9 #define CONNECTION_EDGE_PRIVATE
10 #define HS_COMMON_PRIVATE
11 #define HS_CLIENT_PRIVATE
12 #define HS_SERVICE_PRIVATE
13 #define NODELIST_PRIVATE
15 #include "test/test.h"
16 #include "test/test_helpers.h"
17 #include "test/log_test_helpers.h"
18 #include "test/hs_test_helpers.h"
20 #include "core/or/connection_edge.h"
21 #include "lib/crypt_ops/crypto_format.h"
22 #include "lib/crypt_ops/crypto_rand.h"
23 #include "feature/hs/hs_common.h"
24 #include "feature/hs/hs_client.h"
25 #include "feature/hs/hs_service.h"
26 #include "app/config/config.h"
27 #include "feature/nodelist/networkstatus.h"
28 #include "feature/dirclient/dirclient.h"
29 #include "feature/dirauth/dirvote.h"
30 #include "feature/nodelist/nodelist.h"
31 #include "feature/nodelist/routerlist.h"
32 #include "app/config/statefile.h"
33 #include "core/or/circuitlist.h"
34 #include "feature/dirauth/shared_random.h"
35 #include "feature/dirauth/voting_schedule.h"
37 #include "feature/nodelist/microdesc_st.h"
38 #include "feature/nodelist/networkstatus_st.h"
39 #include "feature/nodelist/node_st.h"
40 #include "app/config/or_state_st.h"
41 #include "feature/nodelist/routerinfo_st.h"
42 #include "feature/nodelist/routerstatus_st.h"
44 /** Test the validation of HS v3 addresses */
46 test_validate_address(void *arg
)
52 /* Address too short and too long. */
53 setup_full_capture_of_logs(LOG_WARN
);
54 ret
= hs_address_is_valid("blah");
55 tt_int_op(ret
, OP_EQ
, 0);
56 expect_log_msg_containing("Invalid length");
57 teardown_capture_of_logs();
59 setup_full_capture_of_logs(LOG_WARN
);
60 ret
= hs_address_is_valid(
61 "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
62 tt_int_op(ret
, OP_EQ
, 0);
63 expect_log_msg_containing("Invalid length");
64 teardown_capture_of_logs();
66 /* Invalid checksum (taken from prop224) */
67 setup_full_capture_of_logs(LOG_WARN
);
68 ret
= hs_address_is_valid(
69 "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
70 tt_int_op(ret
, OP_EQ
, 0);
71 expect_log_msg_containing("invalid checksum");
72 teardown_capture_of_logs();
74 setup_full_capture_of_logs(LOG_WARN
);
75 ret
= hs_address_is_valid(
76 "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
77 tt_int_op(ret
, OP_EQ
, 0);
78 expect_log_msg_containing("invalid checksum");
79 teardown_capture_of_logs();
81 /* Non base32 decodable string. */
82 setup_full_capture_of_logs(LOG_WARN
);
83 ret
= hs_address_is_valid(
84 "????????????????????????????????????????????????????????");
85 tt_int_op(ret
, OP_EQ
, 0);
86 expect_log_msg_containing("Unable to base32 decode");
87 teardown_capture_of_logs();
90 ret
= hs_address_is_valid(
91 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
92 tt_int_op(ret
, OP_EQ
, 1);
99 mock_write_str_to_file(const char *path
, const char *str
, int bin
)
102 tt_str_op(path
, OP_EQ
, "/double/five"PATH_SEPARATOR
"squared");
103 tt_str_op(str
, OP_EQ
,
104 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
110 /** Test building HS v3 onion addresses. Uses test vectors from the
111 * ./hs_build_address.py script. */
113 test_build_address(void *arg
)
116 char onion_addr
[HS_SERVICE_ADDR_LEN_BASE32
+ 1];
117 ed25519_public_key_t pubkey
;
118 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
120 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
121 hs_service_t
*service
= NULL
;
125 MOCK(write_str_to_file
, mock_write_str_to_file
);
127 /* The following has been created with hs_build_address.py script that
128 * follows proposal 224 specification to build an onion address. */
129 static const char *test_addr
=
130 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
132 /* Let's try to build the same onion address as the script */
133 base16_decode((char*)pubkey
.pubkey
, sizeof(pubkey
.pubkey
),
134 pubkey_hex
, strlen(pubkey_hex
));
135 hs_build_address(&pubkey
, HS_VERSION_THREE
, onion_addr
);
136 tt_str_op(test_addr
, OP_EQ
, onion_addr
);
137 /* Validate that address. */
138 ret
= hs_address_is_valid(onion_addr
);
139 tt_int_op(ret
, OP_EQ
, 1);
141 service
= tor_malloc_zero(sizeof(hs_service_t
));
142 memcpy(service
->onion_address
, onion_addr
, sizeof(service
->onion_address
));
143 tor_asprintf(&service
->config
.directory_path
, "/double/five");
144 ret
= write_address_to_file(service
, "squared");
145 tt_int_op(ret
, OP_EQ
, 0);
148 hs_service_free(service
);
151 /** Test that our HS time period calculation functions work properly */
153 test_time_period(void *arg
)
158 time_t fake_time
, correct_time
, start_time
;
160 /* Let's do the example in prop224 section [TIME-PERIODS] */
161 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
163 tt_int_op(retval
, OP_EQ
, 0);
165 /* Check that the time period number is right */
166 tn
= hs_get_time_period_num(fake_time
);
167 tt_u64_op(tn
, OP_EQ
, 16903);
169 /* Increase current time to 11:59:59 UTC and check that the time period
170 number is still the same */
172 tn
= hs_get_time_period_num(fake_time
);
173 tt_u64_op(tn
, OP_EQ
, 16903);
175 { /* Check start time of next time period */
176 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
178 tt_int_op(retval
, OP_EQ
, 0);
180 start_time
= hs_get_start_time_of_next_time_period(fake_time
);
181 tt_int_op(start_time
, OP_EQ
, correct_time
);
184 /* Now take time to 12:00:00 UTC and check that the time period rotated */
186 tn
= hs_get_time_period_num(fake_time
);
187 tt_u64_op(tn
, OP_EQ
, 16904);
189 /* Now also check our hs_get_next_time_period_num() function */
190 tn
= hs_get_next_time_period_num(fake_time
);
191 tt_u64_op(tn
, OP_EQ
, 16905);
193 { /* Check start time of next time period again */
194 retval
= parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
196 tt_int_op(retval
, OP_EQ
, 0);
198 start_time
= hs_get_start_time_of_next_time_period(fake_time
);
199 tt_int_op(start_time
, OP_EQ
, correct_time
);
202 /* Now do another sanity check: The time period number at the start of the
203 * next time period, must be the same time period number as the one returned
204 * from hs_get_next_time_period_num() */
206 time_t next_tp_start
= hs_get_start_time_of_next_time_period(fake_time
);
207 tt_u64_op(hs_get_time_period_num(next_tp_start
), OP_EQ
,
208 hs_get_next_time_period_num(fake_time
));
215 /** Test that we can correctly find the start time of the next time period */
217 test_start_time_of_next_time_period(void *arg
)
222 char tbuf
[ISO_TIME_LEN
+ 1];
223 time_t next_tp_start_time
;
225 /* Do some basic tests */
226 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
228 tt_int_op(retval
, OP_EQ
, 0);
229 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
230 /* Compare it with the correct result */
231 format_iso_time(tbuf
, next_tp_start_time
);
232 tt_str_op("2016-04-13 12:00:00", OP_EQ
, tbuf
);
234 /* Another test with an edge-case time (start of TP) */
235 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
237 tt_int_op(retval
, OP_EQ
, 0);
238 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
239 format_iso_time(tbuf
, next_tp_start_time
);
240 tt_str_op("2016-04-14 12:00:00", OP_EQ
, tbuf
);
243 /* Now pretend we are on a testing network and alter the voting schedule to
244 be every 10 seconds. This means that a time period has length 10*24
245 seconds (4 minutes). It also means that we apply a rotational offset of
246 120 seconds to the time period, so that it starts at 00:02:00 instead of
248 or_options_t
*options
= get_options_mutable();
249 options
->TestingTorNetwork
= 1;
250 options
->V3AuthVotingInterval
= 10;
251 options
->TestingV3AuthInitialVotingInterval
= 10;
253 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
255 tt_int_op(retval
, OP_EQ
, 0);
256 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
257 /* Compare it with the correct result */
258 format_iso_time(tbuf
, next_tp_start_time
);
259 tt_str_op("2016-04-13 00:02:00", OP_EQ
, tbuf
);
261 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
263 tt_int_op(retval
, OP_EQ
, 0);
264 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
265 /* Compare it with the correct result */
266 format_iso_time(tbuf
, next_tp_start_time
);
267 tt_str_op("2016-04-13 00:06:00", OP_EQ
, tbuf
);
274 /* Cleanup the global nodelist. It also frees the "md" in the node_t because
275 * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
277 cleanup_nodelist(void)
279 const smartlist_t
*nodelist
= nodelist_get_list();
280 SMARTLIST_FOREACH_BEGIN(nodelist
, node_t
*, node
) {
283 } SMARTLIST_FOREACH_END(node
);
288 helper_add_hsdir_to_networkstatus(networkstatus_t
*ns
,
290 const char *nickname
,
293 routerstatus_t
*rs
= tor_malloc_zero(sizeof(routerstatus_t
));
294 routerinfo_t
*ri
= tor_malloc_zero(sizeof(routerinfo_t
));
295 uint8_t identity
[DIGEST_LEN
];
298 memset(identity
, identity_idx
, sizeof(identity
));
300 memcpy(rs
->identity_digest
, identity
, DIGEST_LEN
);
301 rs
->is_hs_dir
= is_hsdir
;
302 rs
->pv
.supports_v3_hsdir
= 1;
303 strlcpy(rs
->nickname
, nickname
, sizeof(rs
->nickname
));
304 tor_addr_parse(&ri
->ipv4_addr
, "1.2.3.4");
305 tor_addr_parse(&rs
->ipv4_addr
, "1.2.3.4");
306 ri
->nickname
= tor_strdup(nickname
);
307 ri
->protocol_list
= tor_strdup("HSDir=1-2 LinkAuth=3");
308 memcpy(ri
->cache_info
.identity_digest
, identity
, DIGEST_LEN
);
309 ri
->cache_info
.signing_key_cert
= tor_malloc_zero(sizeof(tor_cert_t
));
310 /* Needed for the HSDir index computation. */
311 memset(&ri
->cache_info
.signing_key_cert
->signing_key
,
312 identity_idx
, ED25519_PUBKEY_LEN
);
313 tt_assert(nodelist_set_routerinfo(ri
, NULL
));
315 node
= node_get_mutable_by_id(ri
->cache_info
.identity_digest
);
318 /* We need this to exist for node_has_preferred_descriptor() to return
320 node
->md
= tor_malloc_zero(sizeof(microdesc_t
));
321 /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
322 * the indexes which it doesn't have when it is called. */
323 node_set_hsdir_index(node
, ns
);
325 smartlist_add(ns
->routerstatus_list
, rs
);
329 routerstatus_free(rs
);
334 static networkstatus_t
*mock_ns
= NULL
;
336 static networkstatus_t
*
337 mock_networkstatus_get_latest_consensus(void)
339 time_t now
= approx_time();
341 /* If initialized, return it */
346 /* Initialize fake consensus */
347 mock_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
349 /* This consensus is live */
350 mock_ns
->valid_after
= now
-1;
351 mock_ns
->fresh_until
= now
+1;
352 mock_ns
->valid_until
= now
+2;
353 /* Create routerstatus list */
354 mock_ns
->routerstatus_list
= smartlist_new();
355 mock_ns
->type
= NS_TYPE_CONSENSUS
;
360 static networkstatus_t
*
361 mock_networkstatus_get_reasonably_live_consensus(time_t now
, int flavor
)
372 /** Test the responsible HSDirs calculation function */
374 test_responsible_hsdirs(void *arg
)
376 smartlist_t
*responsible_dirs
= smartlist_new();
377 networkstatus_t
*ns
= NULL
;
382 MOCK(networkstatus_get_latest_consensus
,
383 mock_networkstatus_get_latest_consensus
);
384 MOCK(networkstatus_get_reasonably_live_consensus
,
385 mock_networkstatus_get_reasonably_live_consensus
);
387 ns
= networkstatus_get_latest_consensus();
389 { /* First router: HSdir */
390 helper_add_hsdir_to_networkstatus(ns
, 1, "igor", 1);
394 helper_add_hsdir_to_networkstatus(ns
, 2, "victor", 1);
397 { /* Third relay but not HSDir */
398 helper_add_hsdir_to_networkstatus(ns
, 3, "spyro", 0);
401 /* Use a fixed time period and pub key so we always take the same path */
402 ed25519_public_key_t pubkey
;
403 uint64_t time_period_num
= 17653; // 2 May, 2018, 14:00.
404 memset(&pubkey
, 42, sizeof(pubkey
));
406 hs_get_responsible_hsdirs(&pubkey
, time_period_num
,
407 0, 0, responsible_dirs
);
409 /* Make sure that we only found 2 responsible HSDirs.
410 * The third relay was not an hsdir! */
411 tt_int_op(smartlist_len(responsible_dirs
), OP_EQ
, 2);
413 /** TODO: Build a bigger network and do more tests here */
416 SMARTLIST_FOREACH(ns
->routerstatus_list
,
417 routerstatus_t
*, rs
, routerstatus_free(rs
));
418 smartlist_free(responsible_dirs
);
419 smartlist_clear(ns
->routerstatus_list
);
420 networkstatus_vote_free(mock_ns
);
423 UNMOCK(networkstatus_get_reasonably_live_consensus
);
427 mock_directory_initiate_request(directory_request_t
*req
)
434 mock_hs_desc_encode_descriptor(const hs_descriptor_t
*desc
,
435 const ed25519_keypair_t
*signing_kp
,
436 const uint8_t *descriptor_cookie
,
441 (void)descriptor_cookie
;
443 tor_asprintf(encoded_out
, "lulu");
447 static or_state_t dummy_state
;
449 /* Mock function to get fake or state (used for rev counters) */
451 get_or_state_replacement(void)
457 mock_router_have_minimum_dir_info(void)
462 /** Test that we correctly detect when the HSDir hash ring changes so that we
463 * reupload our descriptor. */
465 test_desc_reupload_logic(void *arg
)
467 networkstatus_t
*ns
= NULL
;
473 MOCK(networkstatus_get_reasonably_live_consensus
,
474 mock_networkstatus_get_reasonably_live_consensus
);
475 MOCK(router_have_minimum_dir_info
,
476 mock_router_have_minimum_dir_info
);
478 get_or_state_replacement
);
479 MOCK(networkstatus_get_latest_consensus
,
480 mock_networkstatus_get_latest_consensus
);
481 MOCK(directory_initiate_request
,
482 mock_directory_initiate_request
);
483 MOCK(hs_desc_encode_descriptor
,
484 mock_hs_desc_encode_descriptor
);
486 ns
= networkstatus_get_latest_consensus();
489 * 1) Upload descriptor to HSDirs
490 * CHECK that previous_hsdirs list was populated.
491 * 2) Then call router_dir_info_changed() without an HSDir set change.
492 * CHECK that no reupload occurs.
493 * 3) Now change the HSDir set, and call dir_info_changed() again.
494 * CHECK that reupload occurs.
495 * 4) Finally call service_desc_schedule_upload().
496 * CHECK that previous_hsdirs list was cleared.
499 /* Let's start by building our descriptor and service */
500 hs_service_descriptor_t
*desc
= service_descriptor_new();
501 hs_service_t
*service
= NULL
;
502 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
504 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
505 char onion_addr
[HS_SERVICE_ADDR_LEN_BASE32
+ 1];
506 ed25519_public_key_t pubkey
;
507 base16_decode((char*)pubkey
.pubkey
, sizeof(pubkey
.pubkey
),
508 pubkey_hex
, strlen(pubkey_hex
));
509 hs_build_address(&pubkey
, HS_VERSION_THREE
, onion_addr
);
510 service
= tor_malloc_zero(sizeof(hs_service_t
));
512 memcpy(service
->onion_address
, onion_addr
, sizeof(service
->onion_address
));
513 ed25519_secret_key_generate(&service
->keys
.identity_sk
, 0);
514 ed25519_public_key_generate(&service
->keys
.identity_pk
,
515 &service
->keys
.identity_sk
);
516 service
->desc_current
= desc
;
517 /* Also add service to service map */
518 hs_service_ht
*service_map
= get_hs_service_map();
519 tt_assert(service_map
);
520 tt_int_op(hs_service_get_num_services(), OP_EQ
, 0);
521 register_service(service_map
, service
);
522 tt_int_op(hs_service_get_num_services(), OP_EQ
, 1);
524 /* Now let's create our hash ring: */
526 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
527 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
528 helper_add_hsdir_to_networkstatus(ns
, 3, "aaron", 1);
529 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
530 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
531 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
534 /* Now let's upload our desc to all hsdirs */
535 upload_descriptor_to_all(service
, desc
);
536 /* Check that previous hsdirs were populated */
537 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
539 /* Poison next upload time so that we can see if it was changed by
540 * router_dir_info_changed(). No changes in hash ring so far, so the upload
541 * time should stay as is. */
542 desc
->next_upload_time
= 42;
543 router_dir_info_changed();
544 tt_int_op(desc
->next_upload_time
, OP_EQ
, 42);
546 /* Now change the HSDir hash ring by swapping nora for aaron.
547 * Start by clearing the hash ring */
549 SMARTLIST_FOREACH(ns
->routerstatus_list
,
550 routerstatus_t
*, rs
, routerstatus_free(rs
));
551 smartlist_clear(ns
->routerstatus_list
);
553 routerlist_free_all();
556 { /* Now add back all the nodes */
557 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
558 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
559 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
560 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
561 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
562 helper_add_hsdir_to_networkstatus(ns
, 7, "nora", 1);
565 /* Now call service_desc_hsdirs_changed() and see that it detected the hash
567 time_t now
= approx_time();
569 tt_int_op(service_desc_hsdirs_changed(service
, desc
), OP_EQ
, 1);
570 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
572 /* Now order another upload and see that we keep having 6 prev hsdirs */
573 upload_descriptor_to_all(service
, desc
);
574 /* Check that previous hsdirs were populated */
575 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
577 /* Now restore the HSDir hash ring to its original state by swapping back
579 /* First clear up the hash ring */
581 SMARTLIST_FOREACH(ns
->routerstatus_list
,
582 routerstatus_t
*, rs
, routerstatus_free(rs
));
583 smartlist_clear(ns
->routerstatus_list
);
585 routerlist_free_all();
588 { /* Now populate the hash ring again */
589 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
590 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
591 helper_add_hsdir_to_networkstatus(ns
, 3, "aaron", 1);
592 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
593 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
594 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
597 /* Check that our algorithm catches this change of hsdirs */
598 tt_int_op(service_desc_hsdirs_changed(service
, desc
), OP_EQ
, 1);
600 /* Now pretend that the descriptor changed, and order a reupload to all
601 HSDirs. Make sure that the set of previous HSDirs was cleared. */
602 service_desc_schedule_upload(desc
, now
, 1);
603 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 0);
605 /* Now reupload again: see that the prev hsdir set got populated again. */
606 upload_descriptor_to_all(service
, desc
);
607 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
610 SMARTLIST_FOREACH(ns
->routerstatus_list
,
611 routerstatus_t
*, rs
, routerstatus_free(rs
));
612 smartlist_clear(ns
->routerstatus_list
);
614 remove_service(get_hs_service_map(), service
);
615 hs_service_free(service
);
617 networkstatus_vote_free(ns
);
622 /** Test disaster SRV computation and caching */
624 test_disaster_srv(void *arg
)
626 uint8_t *cached_disaster_srv_one
= NULL
;
627 uint8_t *cached_disaster_srv_two
= NULL
;
628 uint8_t srv_one
[DIGEST256_LEN
] = {0};
629 uint8_t srv_two
[DIGEST256_LEN
] = {0};
630 uint8_t srv_three
[DIGEST256_LEN
] = {0};
631 uint8_t srv_four
[DIGEST256_LEN
] = {0};
632 uint8_t srv_five
[DIGEST256_LEN
] = {0};
636 /* Get the cached SRVs: we gonna use them later for verification */
637 cached_disaster_srv_one
= get_first_cached_disaster_srv();
638 cached_disaster_srv_two
= get_second_cached_disaster_srv();
640 /* Compute some srvs */
641 get_disaster_srv(1, srv_one
);
642 get_disaster_srv(2, srv_two
);
644 /* Check that the cached ones were updated */
645 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_one
, DIGEST256_LEN
);
646 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
648 /* For at least one SRV, check that its result was as expected. */
650 uint8_t srv1_expected
[32];
652 (char*)srv1_expected
,
653 "shared-random-disaster\0\0\0\0\0\0\x05\xA0\0\0\0\0\0\0\0\1",
654 strlen("shared-random-disaster")+16,
656 tt_mem_op(srv_one
, OP_EQ
, srv1_expected
, DIGEST256_LEN
);
657 tt_str_op(hex_str((char*)srv_one
, DIGEST256_LEN
), OP_EQ
,
658 "F8A4948707653837FA44ABB5BBC75A12F6F101E7F8FAF699B9715F4965D3507D");
661 /* Ask for an SRV that has already been computed */
662 get_disaster_srv(2, srv_two
);
663 /* and check that the cache entries have not changed */
664 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_one
, DIGEST256_LEN
);
665 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
667 /* Ask for a new SRV */
668 get_disaster_srv(3, srv_three
);
669 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_three
, DIGEST256_LEN
);
670 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
672 /* Ask for another SRV: none of the original SRVs should now be cached */
673 get_disaster_srv(4, srv_four
);
674 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_three
, DIGEST256_LEN
);
675 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_four
, DIGEST256_LEN
);
677 /* Ask for yet another SRV */
678 get_disaster_srv(5, srv_five
);
679 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_five
, DIGEST256_LEN
);
680 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_four
, DIGEST256_LEN
);
686 /** Test our HS descriptor request tracker by making various requests and
687 * checking whether they get tracked properly. */
689 test_hid_serv_request_tracker(void *arg
)
693 routerstatus_t
*hsdir
= NULL
, *hsdir2
= NULL
, *hsdir3
= NULL
;
694 time_t now
= approx_time();
696 const char *req_key_str_first
=
697 "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
698 const char *req_key_str_second
=
699 "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
700 const char *req_key_str_small
= "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
702 /*************************** basic test *******************************/
704 /* Get request tracker and make sure it's empty */
705 strmap_t
*request_tracker
= get_last_hid_serv_requests();
706 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 0);
708 /* Let's register a hid serv request */
709 hsdir
= tor_malloc_zero(sizeof(routerstatus_t
));
710 memset(hsdir
->identity_digest
, 'Z', DIGEST_LEN
);
711 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
713 tt_int_op(retval
, OP_EQ
, now
);
714 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
716 /* Let's lookup a non-existent hidserv request */
717 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_second
,
719 tt_int_op(retval
, OP_EQ
, 0);
720 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
722 /* Let's lookup a real hidserv request */
723 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
725 tt_int_op(retval
, OP_EQ
, now
); /* we got it */
726 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
728 /**********************************************************************/
730 /* Let's add another request for the same HS but on a different HSDir. */
731 hsdir2
= tor_malloc_zero(sizeof(routerstatus_t
));
732 memset(hsdir2
->identity_digest
, 2, DIGEST_LEN
);
733 retval
= hs_lookup_last_hid_serv_request(hsdir2
, req_key_str_first
,
735 tt_int_op(retval
, OP_EQ
, now
+3);
736 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
738 /* Check that we can clean the first request based on time */
739 hs_clean_last_hid_serv_requests(now
+3+REND_HID_SERV_DIR_REQUERY_PERIOD
);
740 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
741 /* Check that it doesn't exist anymore */
742 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
744 tt_int_op(retval
, OP_EQ
, 0);
746 /* Now let's add a smaller req key str */
747 hsdir3
= tor_malloc_zero(sizeof(routerstatus_t
));
748 memset(hsdir3
->identity_digest
, 3, DIGEST_LEN
);
749 retval
= hs_lookup_last_hid_serv_request(hsdir3
, req_key_str_small
,
751 tt_int_op(retval
, OP_EQ
, now
+4);
752 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
754 /*************************** deleting entries **************************/
756 /* Add another request with very short key */
757 retval
= hs_lookup_last_hid_serv_request(hsdir
, "l", now
, 1);
758 tt_int_op(retval
, OP_EQ
, now
);
759 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
761 /* Try deleting entries with a dummy key. Check that our previous requests
763 tor_capture_bugs_(1);
764 hs_purge_hid_serv_from_last_hid_serv_requests("a");
765 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
766 tor_end_capture_bugs_();
768 /* Try another dummy key. Check that requests are still there */
771 memset(dummy
, 'Z', 2000);
772 dummy
[1999] = '\x00';
773 hs_purge_hid_serv_from_last_hid_serv_requests(dummy
);
774 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
777 /* Another dummy key! */
778 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second
);
779 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
781 /* Now actually delete a request! */
782 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first
);
783 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
786 hs_purge_last_hid_serv_requests();
787 request_tracker
= get_last_hid_serv_requests();
788 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 0);
797 test_parse_extended_hostname(void *arg
)
800 hostname_type_t type
;
802 char address1
[] = "fooaddress.onion";
803 char address3
[] = "fooaddress.exit";
804 char address4
[] = "www.torproject.org";
805 char address5
[] = "foo.abcdefghijklmnop.onion";
806 char address6
[] = "foo.bar.abcdefghijklmnop.onion";
807 char address7
[] = ".abcdefghijklmnop.onion";
809 "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
811 "www.15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
813 "15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid7jdl.onion";
815 tt_assert(!parse_extended_hostname(address1
, &type
));
816 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
818 tt_assert(parse_extended_hostname(address3
, &type
));
819 tt_int_op(type
, OP_EQ
, EXIT_HOSTNAME
);
821 tt_assert(parse_extended_hostname(address4
, &type
));
822 tt_int_op(type
, OP_EQ
, NORMAL_HOSTNAME
);
824 tt_assert(!parse_extended_hostname(address5
, &type
));
825 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
827 tt_assert(!parse_extended_hostname(address6
, &type
));
828 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
830 tt_assert(!parse_extended_hostname(address7
, &type
));
831 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
833 tt_assert(parse_extended_hostname(address8
, &type
));
834 tt_int_op(type
, OP_EQ
, ONION_V3_HOSTNAME
);
835 tt_str_op(address8
, OP_EQ
,
836 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
838 /* Invalid v3 address. */
839 tt_assert(!parse_extended_hostname(address9
, &type
));
840 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
842 /* Invalid v3 address: too long */
843 tt_assert(!parse_extended_hostname(address10
, &type
));
844 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
850 test_time_between_tp_and_srv(void *arg
)
856 /* This function should be returning true where "^" are:
858 * +------------------------------------------------------------------+
860 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
861 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
863 * | $==========|-----------$===========|-----------$===========| |
864 * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
866 * +------------------------------------------------------------------+
869 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns
.valid_after
);
870 tt_int_op(ret
, OP_EQ
, 0);
871 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns
.fresh_until
);
872 tt_int_op(ret
, OP_EQ
, 0);
873 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
874 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
875 tt_int_op(ret
, OP_EQ
, 0);
877 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns
.valid_after
);
878 tt_int_op(ret
, OP_EQ
, 0);
879 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns
.fresh_until
);
880 tt_int_op(ret
, OP_EQ
, 0);
881 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
882 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
883 tt_int_op(ret
, OP_EQ
, 0);
885 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns
.valid_after
);
886 tt_int_op(ret
, OP_EQ
, 0);
887 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns
.fresh_until
);
888 tt_int_op(ret
, OP_EQ
, 0);
889 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
890 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
891 tt_int_op(ret
, OP_EQ
, 1);
893 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns
.valid_after
);
894 tt_int_op(ret
, OP_EQ
, 0);
895 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns
.fresh_until
);
896 tt_int_op(ret
, OP_EQ
, 0);
897 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
898 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
899 tt_int_op(ret
, OP_EQ
, 1);
901 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns
.valid_after
);
902 tt_int_op(ret
, OP_EQ
, 0);
903 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns
.fresh_until
);
904 tt_int_op(ret
, OP_EQ
, 0);
905 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
906 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
907 tt_int_op(ret
, OP_EQ
, 0);
913 /************ Reachability Test (it is huge) ****************/
915 /* Simulate different consensus for client and service. Used by the
916 * reachability test. The SRV and responsible HSDir list are used by all
917 * reachability tests so make them common to simplify setup and teardown. */
918 static networkstatus_t
*mock_service_ns
= NULL
;
919 static networkstatus_t
*mock_client_ns
= NULL
;
920 static sr_srv_t current_srv
, previous_srv
;
921 static smartlist_t
*service_responsible_hsdirs
= NULL
;
922 static smartlist_t
*client_responsible_hsdirs
= NULL
;
924 static networkstatus_t
*
925 mock_networkstatus_get_reasonably_live_consensus_service(time_t now
,
931 if (mock_service_ns
) {
932 return mock_service_ns
;
935 mock_service_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
936 mock_service_ns
->routerstatus_list
= smartlist_new();
937 mock_service_ns
->type
= NS_TYPE_CONSENSUS
;
939 return mock_service_ns
;
942 static networkstatus_t
*
943 mock_networkstatus_get_latest_consensus_service(void)
945 return mock_networkstatus_get_reasonably_live_consensus_service(0, 0);
948 static networkstatus_t
*
949 mock_networkstatus_get_reasonably_live_consensus_client(time_t now
, int flavor
)
954 if (mock_client_ns
) {
955 return mock_client_ns
;
958 mock_client_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
959 mock_client_ns
->routerstatus_list
= smartlist_new();
960 mock_client_ns
->type
= NS_TYPE_CONSENSUS
;
962 return mock_client_ns
;
965 static networkstatus_t
*
966 mock_networkstatus_get_latest_consensus_client(void)
968 return mock_networkstatus_get_reasonably_live_consensus_client(0, 0);
971 /* Mock function because we are not trying to test the close circuit that does
972 * an awful lot of checks on the circuit object. */
974 mock_circuit_mark_for_close(circuit_t
*circ
, int reason
, int line
,
984 /* Initialize a big HSDir V3 hash ring. */
986 helper_initialize_big_hash_ring(networkstatus_t
*ns
)
990 /* Generate 250 hsdirs! :) */
991 for (int counter
= 1 ; counter
< 251 ; counter
++) {
992 /* Let's generate random nickname for each hsdir... */
993 char nickname_binary
[8];
994 char nickname_str
[13] = {0};
995 crypto_rand(nickname_binary
, sizeof(nickname_binary
));
996 ret
= base64_encode(nickname_str
, sizeof(nickname_str
),
997 nickname_binary
, sizeof(nickname_binary
), 0);
998 tt_int_op(ret
, OP_EQ
, 12);
999 helper_add_hsdir_to_networkstatus(ns
, counter
, nickname_str
, 1);
1002 /* Make sure we have 200 hsdirs in our list */
1003 tt_int_op(smartlist_len(ns
->routerstatus_list
), OP_EQ
, 250);
1009 /** Initialize service and publish its descriptor as needed. Return the newly
1010 * allocated service object to the caller. */
1011 static hs_service_t
*
1012 helper_init_service(time_t now
)
1015 hs_service_t
*service
= hs_service_new(get_options());
1017 service
->config
.version
= HS_VERSION_THREE
;
1018 ed25519_secret_key_generate(&service
->keys
.identity_sk
, 0);
1019 ed25519_public_key_generate(&service
->keys
.identity_pk
,
1020 &service
->keys
.identity_sk
);
1021 /* Register service to global map. */
1022 retval
= register_service(get_hs_service_map(), service
);
1023 tt_int_op(retval
, OP_EQ
, 0);
1025 /* Initialize service descriptor */
1026 build_all_descriptors(now
);
1027 tt_assert(service
->desc_current
);
1028 tt_assert(service
->desc_next
);
1034 /* Helper function to set the RFC 1123 time string into t. */
1036 set_consensus_times(const char *timestr
, time_t *t
)
1041 int ret
= parse_rfc1123_time(timestr
, t
);
1042 tt_int_op(ret
, OP_EQ
, 0);
1048 /* Helper function to cleanup the mock consensus (client and service) */
1050 cleanup_mock_ns(void)
1052 if (mock_service_ns
) {
1053 SMARTLIST_FOREACH(mock_service_ns
->routerstatus_list
,
1054 routerstatus_t
*, rs
, routerstatus_free(rs
));
1055 smartlist_clear(mock_service_ns
->routerstatus_list
);
1056 mock_service_ns
->sr_info
.current_srv
= NULL
;
1057 mock_service_ns
->sr_info
.previous_srv
= NULL
;
1058 networkstatus_vote_free(mock_service_ns
);
1059 mock_service_ns
= NULL
;
1062 if (mock_client_ns
) {
1063 SMARTLIST_FOREACH(mock_client_ns
->routerstatus_list
,
1064 routerstatus_t
*, rs
, routerstatus_free(rs
));
1065 smartlist_clear(mock_client_ns
->routerstatus_list
);
1066 mock_client_ns
->sr_info
.current_srv
= NULL
;
1067 mock_client_ns
->sr_info
.previous_srv
= NULL
;
1068 networkstatus_vote_free(mock_client_ns
);
1069 mock_client_ns
= NULL
;
1073 /* Helper function to setup a reachability test. Once called, the
1074 * cleanup_reachability_test MUST be called at the end. */
1076 setup_reachability_test(void)
1078 MOCK(circuit_mark_for_close_
, mock_circuit_mark_for_close
);
1079 MOCK(get_or_state
, get_or_state_replacement
);
1083 /* Baseline to start with. */
1084 memset(¤t_srv
, 0, sizeof(current_srv
));
1085 memset(&previous_srv
, 1, sizeof(previous_srv
));
1087 /* Initialize the consensuses. */
1088 mock_networkstatus_get_latest_consensus_service();
1089 mock_networkstatus_get_latest_consensus_client();
1091 service_responsible_hsdirs
= smartlist_new();
1092 client_responsible_hsdirs
= smartlist_new();
1095 /* Helper function to cleanup a reachability test initial setup. */
1097 cleanup_reachability_test(void)
1099 smartlist_free(service_responsible_hsdirs
);
1100 service_responsible_hsdirs
= NULL
;
1101 smartlist_free(client_responsible_hsdirs
);
1102 client_responsible_hsdirs
= NULL
;
1105 UNMOCK(get_or_state
);
1106 UNMOCK(circuit_mark_for_close_
);
1109 /* A reachability test always check if the resulting service and client
1110 * responsible HSDir for the given parameters are equal.
1112 * Return true iff the same exact nodes are in both list. */
1114 are_responsible_hsdirs_equal(void)
1117 tt_int_op(smartlist_len(client_responsible_hsdirs
), OP_EQ
, 6);
1118 tt_int_op(smartlist_len(service_responsible_hsdirs
), OP_EQ
, 8);
1120 SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs
,
1121 const routerstatus_t
*, c_rs
) {
1122 SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs
,
1123 const routerstatus_t
*, s_rs
) {
1124 if (tor_memeq(c_rs
->identity_digest
, s_rs
->identity_digest
,
1129 } SMARTLIST_FOREACH_END(s_rs
);
1130 } SMARTLIST_FOREACH_END(c_rs
);
1133 return (count
== 6);
1136 /* Tor doesn't use such a function to get the previous HSDir, it is only used
1137 * in node_set_hsdir_index(). We need it here so we can test the reachability
1138 * scenario 6 that requires the previous time period to compute the list of
1139 * responsible HSDir because of the client state timing. */
1141 get_previous_time_period(time_t now
)
1143 return hs_get_time_period_num(now
) - 1;
1146 /* Configuration of a reachability test scenario. */
1147 typedef struct reachability_cfg_t
{
1148 /* Consensus timings to be set. They have to be compliant with
1149 * RFC 1123 time format. */
1150 const char *service_valid_after
;
1151 const char *service_valid_until
;
1152 const char *client_valid_after
;
1153 const char *client_valid_until
;
1155 /* SRVs that the service and client should use. */
1156 sr_srv_t
*service_current_srv
;
1157 sr_srv_t
*service_previous_srv
;
1158 sr_srv_t
*client_current_srv
;
1159 sr_srv_t
*client_previous_srv
;
1161 /* A time period function for the service to use for this scenario. For a
1162 * successful reachability test, the client always use the current time
1163 * period thus why no client function. */
1164 uint64_t (*service_time_period_fn
)(time_t);
1166 /* Is the client and service expected to be in a new time period. After
1167 * setting the consensus time, the reachability test checks
1168 * hs_in_period_between_tp_and_srv() and test the returned value against
1170 unsigned int service_in_new_tp
;
1171 unsigned int client_in_new_tp
;
1173 /* Some scenario requires a hint that the client, because of its consensus
1174 * time, will request the "next" service descriptor so this indicates if it
1175 * is the case or not. */
1176 unsigned int client_fetch_next_desc
;
1177 } reachability_cfg_t
;
1179 /* Some defines to help with semantic while reading a configuration below. */
1180 #define NOT_IN_NEW_TP 0
1182 #define DONT_NEED_NEXT_DESC 0
1183 #define NEED_NEXT_DESC 1
1185 static reachability_cfg_t reachability_scenarios
[] = {
1188 * +------------------------------------------------------------------+
1190 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1191 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1193 * | $==========|-----------$===========|-----------$===========| |
1196 * +------------------------------------------------------------------+
1198 * S: Service, C: Client
1200 * Service consensus valid_after time is set to 13:00 and client to 15:00,
1201 * both are after TP#1 thus have access to SRV#1. Service and client should
1205 { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
1206 "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
1207 "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
1208 "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
1209 ¤t_srv
, NULL
, /* Service current and previous SRV */
1210 ¤t_srv
, NULL
, /* Client current and previous SRV */
1211 hs_get_time_period_num
, /* Service time period function. */
1212 IN_NEW_TP
, /* Is service in new TP? */
1213 IN_NEW_TP
, /* Is client in new TP? */
1218 * +------------------------------------------------------------------+
1220 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1221 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1223 * | $==========|-----------$===========|-----------$===========| |
1226 * +------------------------------------------------------------------+
1228 * S: Service, C: Client
1230 * Service consensus valid_after time is set to 23:00 and client to 01:00,
1231 * which makes the client after the SRV#2 and the service just before. The
1232 * service should only be using TP#1. The client should be using TP#1.
1235 { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
1236 "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
1237 "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
1238 "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
1239 &previous_srv
, NULL
, /* Service current and previous SRV */
1240 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1241 hs_get_time_period_num
, /* Service time period function. */
1242 IN_NEW_TP
, /* Is service in new TP? */
1243 NOT_IN_NEW_TP
, /* Is client in new TP? */
1248 * +------------------------------------------------------------------+
1250 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1251 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1253 * | $==========|-----------$===========|----------$===========| |
1256 * +------------------------------------------------------------------+
1258 * S: Service, C: Client
1260 * Service consensus valid_after time is set to 03:00 and client to 05:00,
1261 * which makes both after SRV#2. The service should be using TP#1 as its
1262 * current time period. The client should be using TP#1.
1265 { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
1266 "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
1267 "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
1268 "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
1269 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1270 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1271 hs_get_time_period_num
, /* Service time period function. */
1272 NOT_IN_NEW_TP
, /* Is service in new TP? */
1273 NOT_IN_NEW_TP
, /* Is client in new TP? */
1274 DONT_NEED_NEXT_DESC
},
1278 * +------------------------------------------------------------------+
1280 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1281 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1283 * | $==========|-----------$===========|-----------$===========| |
1286 * +------------------------------------------------------------------+
1288 * S: Service, C: Client
1290 * Service consensus valid_after time is set to 11:00 and client to 13:00,
1291 * which makes the service before TP#2 and the client just after. The
1292 * service should be using TP#1 as its current time period and TP#2 as the
1293 * next. The client should be using TP#2 time period.
1296 { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
1297 "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
1298 "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
1299 "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
1300 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1301 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1302 hs_get_next_time_period_num
, /* Service time period function. */
1303 NOT_IN_NEW_TP
, /* Is service in new TP? */
1304 IN_NEW_TP
, /* Is client in new TP? */
1309 * +------------------------------------------------------------------+
1311 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1312 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1314 * | $==========|-----------$===========|-----------$===========| |
1317 * +------------------------------------------------------------------+
1319 * S: Service, C: Client
1321 * Service consensus valid_after time is set to 01:00 and client to 23:00,
1322 * which makes the service after SRV#2 and the client just before. The
1323 * service should be using TP#1 as its current time period and TP#2 as the
1324 * next. The client should be using TP#1 time period.
1327 { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
1328 "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
1329 "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
1330 "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
1331 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1332 &previous_srv
, NULL
, /* Client current and previous SRV */
1333 hs_get_time_period_num
, /* Service time period function. */
1334 NOT_IN_NEW_TP
, /* Is service in new TP? */
1335 IN_NEW_TP
, /* Is client in new TP? */
1336 DONT_NEED_NEXT_DESC
},
1340 * +------------------------------------------------------------------+
1342 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1343 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1345 * | $==========|-----------$===========|-----------$===========| |
1348 * +------------------------------------------------------------------+
1350 * S: Service, C: Client
1352 * Service consensus valid_after time is set to 13:00 and client to 11:00,
1353 * which makes the service outside after TP#2 and the client just before.
1354 * The service should be using TP#1 as its current time period and TP#2 as
1355 * its next. The client should be using TP#1 time period.
1358 { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
1359 "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
1360 "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
1361 "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
1362 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1363 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1364 get_previous_time_period
, /* Service time period function. */
1365 IN_NEW_TP
, /* Is service in new TP? */
1366 NOT_IN_NEW_TP
, /* Is client in new TP? */
1367 DONT_NEED_NEXT_DESC
},
1370 { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, 0, 0, 0}
1373 /* Run a single reachability scenario. num_scenario is the corresponding
1374 * scenario number from the documentation. It is used to log it in case of
1375 * failure so we know which scenario fails. */
1377 run_reachability_scenario(const reachability_cfg_t
*cfg
, int num_scenario
)
1380 hs_service_t
*service
;
1381 uint64_t service_tp
, client_tp
;
1382 ed25519_public_key_t service_blinded_pk
, client_blinded_pk
;
1384 setup_reachability_test();
1388 /* Set service consensus time. */
1389 set_consensus_times(cfg
->service_valid_after
,
1390 &mock_service_ns
->valid_after
);
1391 set_consensus_times(cfg
->service_valid_until
,
1392 &mock_service_ns
->valid_until
);
1393 set_consensus_times(cfg
->service_valid_until
,
1394 &mock_service_ns
->fresh_until
);
1395 dirauth_sched_recalculate_timing(get_options(),
1396 mock_service_ns
->valid_after
);
1397 /* Check that service is in the right time period point */
1398 tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns
, 0), OP_EQ
,
1399 cfg
->service_in_new_tp
);
1401 /* Set client consensus time. */
1402 set_consensus_times(cfg
->client_valid_after
,
1403 &mock_client_ns
->valid_after
);
1404 set_consensus_times(cfg
->client_valid_until
,
1405 &mock_client_ns
->valid_until
);
1406 set_consensus_times(cfg
->client_valid_until
,
1407 &mock_client_ns
->fresh_until
);
1408 dirauth_sched_recalculate_timing(get_options(),
1409 mock_client_ns
->valid_after
);
1410 /* Check that client is in the right time period point */
1411 tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns
, 0), OP_EQ
,
1412 cfg
->client_in_new_tp
);
1414 /* Set the SRVs for this scenario. */
1415 mock_client_ns
->sr_info
.current_srv
= cfg
->client_current_srv
;
1416 mock_client_ns
->sr_info
.previous_srv
= cfg
->client_previous_srv
;
1417 mock_service_ns
->sr_info
.current_srv
= cfg
->service_current_srv
;
1418 mock_service_ns
->sr_info
.previous_srv
= cfg
->service_previous_srv
;
1420 /* Initialize a service to get keys. */
1421 update_approx_time(mock_service_ns
->valid_after
);
1422 service
= helper_init_service(mock_service_ns
->valid_after
+1);
1425 * === Client setup ===
1428 MOCK(networkstatus_get_reasonably_live_consensus
,
1429 mock_networkstatus_get_reasonably_live_consensus_client
);
1430 MOCK(networkstatus_get_latest_consensus
,
1431 mock_networkstatus_get_latest_consensus_client
);
1433 /* Make networkstatus_is_live() happy. */
1434 update_approx_time(mock_client_ns
->valid_after
);
1435 /* Initialize a big hashring for this consensus with the hsdir index set. */
1436 helper_initialize_big_hash_ring(mock_client_ns
);
1438 /* Client ONLY use the current time period. This is the whole point of these
1439 * reachability test that is to make sure the client can always reach the
1440 * service using only its current time period. */
1441 client_tp
= hs_get_time_period_num(0);
1443 hs_build_blinded_pubkey(&service
->keys
.identity_pk
, NULL
, 0,
1444 client_tp
, &client_blinded_pk
);
1445 hs_get_responsible_hsdirs(&client_blinded_pk
, client_tp
, 0, 1,
1446 client_responsible_hsdirs
);
1447 /* Cleanup the nodelist so we can let the service computes its own set of
1448 * node with its own hashring. */
1450 tt_int_op(smartlist_len(client_responsible_hsdirs
), OP_EQ
, 6);
1452 UNMOCK(networkstatus_get_latest_consensus
);
1453 UNMOCK(networkstatus_get_reasonably_live_consensus
);
1456 * === Service setup ===
1459 MOCK(networkstatus_get_reasonably_live_consensus
,
1460 mock_networkstatus_get_reasonably_live_consensus_service
);
1461 MOCK(networkstatus_get_latest_consensus
,
1462 mock_networkstatus_get_latest_consensus_service
);
1464 /* Make networkstatus_is_live() happy. */
1465 update_approx_time(mock_service_ns
->valid_after
);
1466 /* Initialize a big hashring for this consensus with the hsdir index set. */
1467 helper_initialize_big_hash_ring(mock_service_ns
);
1469 service_tp
= cfg
->service_time_period_fn(0);
1471 hs_build_blinded_pubkey(&service
->keys
.identity_pk
, NULL
, 0,
1472 service_tp
, &service_blinded_pk
);
1474 /* A service builds two lists of responsible HSDir, for the current and the
1475 * next descriptor. Depending on the scenario, the client timing indicate if
1476 * it is fetching the current or the next descriptor so we use the
1477 * "client_fetch_next_desc" to know which one the client is trying to get to
1478 * confirm that the service computes the same hashring for the same blinded
1479 * key and service time period function. */
1480 hs_get_responsible_hsdirs(&service_blinded_pk
, service_tp
,
1481 cfg
->client_fetch_next_desc
, 0,
1482 service_responsible_hsdirs
);
1484 tt_int_op(smartlist_len(service_responsible_hsdirs
), OP_EQ
, 8);
1486 UNMOCK(networkstatus_get_latest_consensus
);
1487 UNMOCK(networkstatus_get_reasonably_live_consensus
);
1489 /* Some testing of the values we just got from the client and service. */
1490 tt_mem_op(&client_blinded_pk
, OP_EQ
, &service_blinded_pk
,
1491 ED25519_PUBKEY_LEN
);
1492 tt_int_op(are_responsible_hsdirs_equal(), OP_EQ
, 1);
1494 /* Everything went well. */
1498 cleanup_reachability_test();
1500 /* Do this so we can know which scenario failed. */
1502 tor_snprintf(msg
, sizeof(msg
), "Scenario %d failed", num_scenario
);
1509 test_reachability(void *arg
)
1513 /* NOTE: An important axiom to understand here is that SRV#N must only be
1514 * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
1515 * together. The HSDir index computation is based on this axiom.*/
1517 for (int i
= 0; reachability_scenarios
[i
].service_valid_after
; ++i
) {
1518 int ret
= run_reachability_scenario(&reachability_scenarios
[i
], i
+ 1);
1526 test_blinding_basics(void *arg
)
1529 char *mem_op_hex_tmp
= NULL
;
1530 const uint64_t time_period
= 1234;
1531 ed25519_keypair_t keypair
;
1534 tt_int_op(0, OP_EQ
, parse_iso_time("1973-05-20 01:50:33", &instant
));
1535 tt_int_op(1440, OP_EQ
, get_time_period_length()); // in minutes, remember.
1536 tt_int_op(time_period
, OP_EQ
, hs_get_time_period_num(instant
));
1538 const char pubkey_hex
[] =
1539 "833990B085C1A688C1D4C8B1F6B56AFAF5A2ECA674449E1D704F83765CCB7BC6";
1540 const char seckey_hex
[] =
1541 "D8C7FF0E31295B66540D789AF3E3DF992038A9592EEA01D8B7CBA06D6E66D159"
1542 "4D6167696320576F7264733A20737065697373636F62616C742062697669756D";
1543 base16_decode((char*)keypair
.pubkey
.pubkey
, sizeof(keypair
.pubkey
.pubkey
),
1544 pubkey_hex
, strlen(pubkey_hex
));
1545 base16_decode((char*)keypair
.seckey
.seckey
, sizeof(keypair
.seckey
.seckey
),
1546 seckey_hex
, strlen(seckey_hex
));
1548 uint64_t period_len
= get_time_period_length();
1549 tt_u64_op(period_len
, OP_EQ
, 1440);
1551 build_blinded_key_param(&keypair
.pubkey
, NULL
, 0,
1554 test_memeq_hex(params
,
1555 "379E50DB31FEE6775ABD0AF6FB7C371E"
1556 "060308F4F847DB09FE4CFE13AF602287");
1558 ed25519_public_key_t blinded_public
;
1559 hs_build_blinded_pubkey(&keypair
.pubkey
, NULL
, 0, time_period
,
1561 hs_subcredential_t subcred
;
1562 hs_get_subcredential(&keypair
.pubkey
, &blinded_public
, &subcred
);
1564 test_memeq_hex(blinded_public
.pubkey
,
1565 "3A50BF210E8F9EE955AE0014F7A6917F"
1566 "B65EBF098A86305ABB508D1A7291B6D5");
1567 test_memeq_hex(subcred
.subcred
,
1568 "635D55907816E8D76398A675A50B1C2F"
1569 "3E36B42A5CA77BA3A0441285161AE07D");
1571 ed25519_keypair_t blinded_keypair
;
1572 hs_build_blinded_keypair(&keypair
, NULL
, 0, time_period
,
1574 tt_mem_op(blinded_public
.pubkey
, OP_EQ
, blinded_keypair
.pubkey
.pubkey
,
1575 ED25519_PUBKEY_LEN
);
1576 test_memeq_hex(blinded_keypair
.seckey
.seckey
,
1577 "A958DC83AC885F6814C67035DE817A2C"
1578 "604D5D2F715282079448F789B656350B"
1579 "4540FE1F80AA3F7E91306B7BF7A8E367"
1580 "293352B14A29FDCC8C19F3558075524B");
1583 tor_free(mem_op_hex_tmp
);
1586 /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
1587 * its identity digest in <b>hsdir_digest_out</b>. */
1589 helper_client_pick_hsdir(const ed25519_public_key_t
*onion_identity_pk
,
1590 char *hsdir_digest_out
)
1592 tt_assert(onion_identity_pk
);
1594 routerstatus_t
*client_hsdir
= pick_hsdir_v3(onion_identity_pk
);
1595 tt_assert(client_hsdir
);
1596 digest_to_base64(hsdir_digest_out
, client_hsdir
->identity_digest
);
1603 test_hs_indexes(void *arg
)
1606 uint64_t period_num
= 42;
1607 ed25519_public_key_t pubkey
;
1611 /* Build the hs_index */
1613 uint8_t hs_index
[DIGEST256_LEN
];
1614 const char *b32_test_vector
=
1615 "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
1616 char test_vector
[DIGEST256_LEN
];
1617 ret
= base16_decode(test_vector
, sizeof(test_vector
), b32_test_vector
,
1618 strlen(b32_test_vector
));
1619 tt_int_op(ret
, OP_EQ
, sizeof(test_vector
));
1620 /* Our test vector uses a public key set to 32 bytes of \x42. */
1621 memset(&pubkey
, '\x42', sizeof(pubkey
));
1622 hs_build_hs_index(1, &pubkey
, period_num
, hs_index
);
1623 tt_mem_op(hs_index
, OP_EQ
, test_vector
, sizeof(hs_index
));
1626 /* Build the hsdir_index */
1628 uint8_t srv
[DIGEST256_LEN
];
1629 uint8_t hsdir_index
[DIGEST256_LEN
];
1630 const char *b32_test_vector
=
1631 "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
1632 char test_vector
[DIGEST256_LEN
];
1633 ret
= base16_decode(test_vector
, sizeof(test_vector
), b32_test_vector
,
1634 strlen(b32_test_vector
));
1635 tt_int_op(ret
, OP_EQ
, sizeof(test_vector
));
1636 /* Our test vector uses a public key set to 32 bytes of \x42. */
1637 memset(&pubkey
, '\x42', sizeof(pubkey
));
1638 memset(srv
, '\x43', sizeof(srv
));
1639 hs_build_hsdir_index(&pubkey
, srv
, period_num
, hsdir_index
);
1640 tt_mem_op(hsdir_index
, OP_EQ
, test_vector
, sizeof(hsdir_index
));
1647 #define EARLY_IN_SRV_TO_TP 0
1648 #define LATE_IN_SRV_TO_TP 1
1649 #define EARLY_IN_TP_TO_SRV 2
1650 #define LATE_IN_TP_TO_SRV 3
1652 /** Set the consensus and system time based on <b>position</b>. See the
1653 * following diagram for details:
1655 * +------------------------------------------------------------------+
1657 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1658 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1660 * | $==========|-----------$===========|----------$===========| |
1663 * +------------------------------------------------------------------+
1666 helper_set_consensus_and_system_time(networkstatus_t
*ns
, int position
)
1668 time_t real_time
= 0;
1670 /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
1671 * valid_after is what matters here, the rest is just to specify the voting
1672 * period correctly. */
1673 if (position
== LATE_IN_SRV_TO_TP
) {
1674 parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns
->valid_after
);
1675 parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns
->fresh_until
);
1676 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns
->valid_until
);
1677 } else if (position
== EARLY_IN_TP_TO_SRV
) {
1678 parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns
->valid_after
);
1679 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns
->fresh_until
);
1680 parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns
->valid_until
);
1681 } else if (position
== LATE_IN_TP_TO_SRV
) {
1682 parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns
->valid_after
);
1683 parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns
->fresh_until
);
1684 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns
->valid_until
);
1685 } else if (position
== EARLY_IN_SRV_TO_TP
) {
1686 parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns
->valid_after
);
1687 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns
->fresh_until
);
1688 parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns
->valid_until
);
1692 dirauth_sched_recalculate_timing(get_options(), ns
->valid_after
);
1694 /* Set system time: pretend to be just 2 minutes before consensus expiry */
1695 real_time
= ns
->valid_until
- 120;
1696 update_approx_time(real_time
);
1702 /** Helper function that carries out the actual test for
1703 * test_client_service_sync() */
1705 helper_test_hsdir_sync(networkstatus_t
*ns
,
1706 int service_position
, int client_position
,
1707 int client_fetches_next_desc
)
1709 hs_service_descriptor_t
*desc
;
1713 * 1) Initialize service time: consensus and system time.
1714 * 1.1) Initialize service hash ring
1715 * 2) Initialize service and publish descriptors.
1716 * 3) Initialize client time: consensus and system time.
1717 * 3.1) Initialize client hash ring
1718 * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
1719 * the client was also picked by service.
1722 /* 1) Initialize service time: consensus and real time */
1723 time_t now
= helper_set_consensus_and_system_time(ns
, service_position
);
1724 helper_initialize_big_hash_ring(ns
);
1726 /* 2) Initialize service */
1727 hs_service_t
*service
= helper_init_service(now
);
1728 desc
= client_fetches_next_desc
? service
->desc_next
: service
->desc_current
;
1730 /* Now let's upload our desc to all hsdirs */
1731 upload_descriptor_to_all(service
, desc
);
1732 /* Cleanup right now so we don't memleak on error. */
1734 /* Check that previous hsdirs were populated */
1735 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 8);
1737 /* 3) Initialize client time */
1738 helper_set_consensus_and_system_time(ns
, client_position
);
1741 SMARTLIST_FOREACH(ns
->routerstatus_list
,
1742 routerstatus_t
*, rs
, routerstatus_free(rs
));
1743 smartlist_clear(ns
->routerstatus_list
);
1744 helper_initialize_big_hash_ring(ns
);
1746 /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
1748 for (int y
= 0 ; y
< 6 ; y
++) {
1749 char client_hsdir_b64_digest
[BASE64_DIGEST_LEN
+1] = {0};
1750 helper_client_pick_hsdir(&service
->keys
.identity_pk
,
1751 client_hsdir_b64_digest
);
1753 /* CHECK: Go through the hsdirs chosen by the service and make sure that it
1754 * contains the one picked by the client! */
1755 retval
= smartlist_contains_string(desc
->previous_hsdirs
,
1756 client_hsdir_b64_digest
);
1757 tt_int_op(retval
, OP_EQ
, 1);
1760 /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
1761 * exhausted all of them: */
1762 tt_assert(!pick_hsdir_v3(&service
->keys
.identity_pk
));
1765 /* At the end: free all services and initialize the subsystem again, we will
1766 * need it for next scenario. */
1768 hs_service_free_all();
1770 SMARTLIST_FOREACH(ns
->routerstatus_list
,
1771 routerstatus_t
*, rs
, routerstatus_free(rs
));
1772 smartlist_clear(ns
->routerstatus_list
);
1775 /** This test ensures that client and service will pick the same HSDirs, under
1776 * various timing scenarios:
1777 * a) Scenario where both client and service are in the time segment between
1779 * b) Scenario where both client and service are in the time segment between
1781 * c) Scenario where service is between SRV#N and TP#N, but client is between
1783 * d) Scenario where service is between TP#N and SRV#N+1, but client is
1784 * between SRV#N and TP#N.
1786 * This test is important because it tests that upload_descriptor_to_all() is
1787 * in synch with pick_hsdir_v3(). That's not the case for the
1788 * test_reachability() test which only compares the responsible hsdir sets.
1791 test_client_service_hsdir_set_sync(void *arg
)
1793 networkstatus_t
*ns
= NULL
;
1797 MOCK(networkstatus_get_latest_consensus
,
1798 mock_networkstatus_get_latest_consensus
);
1799 MOCK(networkstatus_get_reasonably_live_consensus
,
1800 mock_networkstatus_get_reasonably_live_consensus
);
1802 get_or_state_replacement
);
1803 MOCK(hs_desc_encode_descriptor
,
1804 mock_hs_desc_encode_descriptor
);
1805 MOCK(directory_initiate_request
,
1806 mock_directory_initiate_request
);
1810 /* Initialize a big hash ring: we want it to be big so that client and
1811 * service cannot accidentally select the same HSDirs */
1812 ns
= networkstatus_get_latest_consensus();
1815 /** Now test the various synch scenarios. See the helper function for more
1818 /* a) Scenario where both client and service are in the time segment between
1819 * SRV#N and TP#N. At this time the client fetches the first HS desc:
1821 * +------------------------------------------------------------------+
1823 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1824 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1826 * | $==========|-----------$===========|----------$===========| |
1829 * +------------------------------------------------------------------+
1831 helper_test_hsdir_sync(ns
, LATE_IN_SRV_TO_TP
, LATE_IN_SRV_TO_TP
, 0);
1833 /* b) Scenario where both client and service are in the time segment between
1834 * TP#N and SRV#N+1. At this time the client fetches the second HS
1837 * +------------------------------------------------------------------+
1839 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1840 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1842 * | $==========|-----------$===========|-----------$===========| |
1845 * +------------------------------------------------------------------+
1847 helper_test_hsdir_sync(ns
, LATE_IN_TP_TO_SRV
, LATE_IN_TP_TO_SRV
, 1);
1849 /* c) Scenario where service is between SRV#N and TP#N, but client is
1850 * between TP#N and SRV#N+1. Client is forward in time so it fetches the
1853 * +------------------------------------------------------------------+
1855 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1856 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1858 * | $==========|-----------$===========|-----------$===========| |
1861 * +------------------------------------------------------------------+
1863 helper_test_hsdir_sync(ns
, LATE_IN_SRV_TO_TP
, EARLY_IN_TP_TO_SRV
, 1);
1865 /* d) Scenario where service is between TP#N and SRV#N+1, but client is
1866 * between SRV#N and TP#N. Client is backwards in time so it fetches the
1869 * +------------------------------------------------------------------+
1871 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1872 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1874 * | $==========|-----------$===========|-----------$===========| |
1877 * +------------------------------------------------------------------+
1879 helper_test_hsdir_sync(ns
, EARLY_IN_TP_TO_SRV
, LATE_IN_SRV_TO_TP
, 0);
1881 /* e) Scenario where service is between SRV#N and TP#N, but client is
1882 * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
1883 * the first HS desc.
1885 * +------------------------------------------------------------------+
1887 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1888 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1890 * | $==========|-----------$===========|-----------$===========| |
1893 * +------------------------------------------------------------------+
1895 helper_test_hsdir_sync(ns
, EARLY_IN_SRV_TO_TP
, LATE_IN_TP_TO_SRV
, 0);
1897 /* f) Scenario where service is between TP#N and SRV#N+1, but client is
1898 * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
1899 * the second HS desc.
1901 * +------------------------------------------------------------------+
1903 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1904 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1906 * | $==========|-----------$===========|-----------$===========| |
1909 * +------------------------------------------------------------------+
1911 helper_test_hsdir_sync(ns
, LATE_IN_TP_TO_SRV
, EARLY_IN_SRV_TO_TP
, 1);
1914 networkstatus_vote_free(ns
);
1915 nodelist_free_all();
1919 struct testcase_t hs_common_tests
[] = {
1920 { "blinding_basics", test_blinding_basics
, TT_FORK
, NULL
, NULL
},
1921 { "build_address", test_build_address
, TT_FORK
,
1923 { "validate_address", test_validate_address
, TT_FORK
,
1925 { "time_period", test_time_period
, TT_FORK
,
1927 { "start_time_of_next_time_period", test_start_time_of_next_time_period
,
1928 TT_FORK
, NULL
, NULL
},
1929 { "responsible_hsdirs", test_responsible_hsdirs
, TT_FORK
,
1931 { "desc_reupload_logic", test_desc_reupload_logic
, TT_FORK
,
1933 { "disaster_srv", test_disaster_srv
, TT_FORK
,
1935 { "hid_serv_request_tracker", test_hid_serv_request_tracker
, TT_FORK
,
1937 { "parse_extended_hostname", test_parse_extended_hostname
, TT_FORK
,
1939 { "time_between_tp_and_srv", test_time_between_tp_and_srv
, TT_FORK
,
1941 { "reachability", test_reachability
, TT_FORK
,
1943 { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync
,
1944 TT_FORK
, NULL
, NULL
},
1945 { "hs_indexes", test_hs_indexes
, TT_FORK
,