1 /* Copyright (c) 2001-2004, Roger Dingledine.
2 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3 * Copyright (c) 2007-2021, The Tor Project, Inc. */
4 /* See LICENSE for licensing information */
6 #include "core/or/or.h"
8 #include "app/config/config.h"
9 #include "core/mainloop/connection.h"
10 #include "feature/dircache/conscache.h"
11 #include "feature/dircache/consdiffmgr.h"
12 #include "feature/dircommon/directory.h"
13 #include "feature/dircache/dirserv.h"
14 #include "feature/nodelist/microdesc.h"
15 #include "feature/nodelist/routerlist.h"
16 #include "feature/relay/router.h"
17 #include "feature/relay/routermode.h"
18 #include "feature/stats/predict_ports.h"
20 #include "feature/dircache/cached_dir_st.h"
21 #include "feature/dircommon/dir_connection_st.h"
22 #include "feature/nodelist/extrainfo_st.h"
23 #include "feature/nodelist/microdesc_st.h"
24 #include "feature/nodelist/routerinfo_st.h"
25 #include "feature/nodelist/routerlist_st.h"
27 #include "lib/compress/compress.h"
31 * \brief Directory server core implementation. Manages directory
32 * contents and generates directory documents.
34 * This module implements most of directory cache functionality, and some of
35 * the directory authority functionality. The directory.c module delegates
36 * here in order to handle incoming requests from clients, via
37 * connection_dirserv_flushed_some() and its kin. In order to save RAM, this
38 * module is responsible for spooling directory objects (in whole or in part)
39 * onto buf_t instances, and then closing the dir_connection_t once the
40 * objects are totally flushed.
42 * The directory.c module also delegates here for handling descriptor uploads
43 * via dirserv_add_multiple_descriptors().
45 * Additionally, this module handles some aspects of voting, including:
46 * deciding how to vote on individual flags (based on decisions reached in
47 * rephist.c), of formatting routerstatus lines, and deciding what relays to
48 * include in an authority's vote. (TODO: Those functions could profitably be
49 * split off. They only live in this file because historically they were
50 * shared among the v1, v2, and v3 directory code.)
53 static void clear_cached_dir(cached_dir_t
*d
);
54 static const signed_descriptor_t
*get_signed_descriptor_by_fp(
58 static int spooled_resource_lookup_body(const spooled_resource_t
*spooled
,
59 int conn_is_encrypted
,
60 const uint8_t **body_out
,
62 time_t *published_out
);
63 static cached_dir_t
*spooled_resource_lookup_cached_dir(
64 const spooled_resource_t
*spooled
,
65 time_t *published_out
);
66 static cached_dir_t
*lookup_cached_dir_by_fp(const uint8_t *fp
);
68 /********************************************************************/
70 /* A set of functions to answer questions about how we'd like to behave
71 * as a directory mirror */
73 /** Return true iff we want to serve certificates for authorities
74 * that we don't acknowledge as authorities ourself.
75 * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch
76 * and keep these certificates.
79 directory_caches_unknown_auth_certs(const or_options_t
*options
)
81 return dir_server_mode(options
) || options
->BridgeRelay
;
84 /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc
86 * Check options->DirPort_set and directory_permits_begindir_requests()
87 * to see if we are willing to serve these directory documents to others via
88 * the DirPort and begindir-over-ORPort, respectively.
90 * To check if we should fetch documents, use we_want_to_fetch_flavor and
91 * we_want_to_fetch_unknown_auth_certs instead of this function.
94 directory_caches_dir_info(const or_options_t
*options
)
96 if (options
->BridgeRelay
|| dir_server_mode(options
))
98 if (!server_mode(options
) || !advertised_server_mode())
100 /* We need an up-to-date view of network info if we're going to try to
101 * block exit attempts from unknown relays. */
102 return ! router_my_exit_policy_is_reject_star() &&
103 should_refuse_unknown_exits(options
);
106 /** Return 1 if we want to allow remote clients to ask us directory
107 * requests via the "begin_dir" interface, which doesn't require
108 * having any separate port open. */
110 directory_permits_begindir_requests(const or_options_t
*options
)
112 return options
->BridgeRelay
!= 0 || dir_server_mode(options
);
115 /********************************************************************/
117 /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're
118 * currently serving. */
119 static strmap_t
*cached_consensuses
= NULL
;
121 /** Decrement the reference count on <b>d</b>, and free it if it no longer has
124 cached_dir_decref(cached_dir_t
*d
)
126 if (!d
|| --d
->refcnt
> 0)
132 /** Allocate and return a new cached_dir_t containing the string <b>s</b>,
133 * published at <b>published</b>. */
135 new_cached_dir(char *s
, time_t published
)
137 cached_dir_t
*d
= tor_malloc_zero(sizeof(cached_dir_t
));
140 d
->dir_len
= strlen(s
);
141 d
->published
= published
;
142 if (tor_compress(&(d
->dir_compressed
), &(d
->dir_compressed_len
),
143 d
->dir
, d
->dir_len
, ZLIB_METHOD
)) {
144 log_warn(LD_BUG
, "Error compressing directory");
149 /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */
151 clear_cached_dir(cached_dir_t
*d
)
154 tor_free(d
->dir_compressed
);
155 memset(d
, 0, sizeof(cached_dir_t
));
158 /** Free all storage held by the cached_dir_t in <b>d</b>. */
160 free_cached_dir_(void *_d
)
166 d
= (cached_dir_t
*)_d
;
167 cached_dir_decref(d
);
170 /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that
171 * we're serving with <b>networkstatus</b>, published at <b>published</b>. No
172 * validation is performed. */
174 dirserv_set_cached_consensus_networkstatus(const char *networkstatus
,
175 size_t networkstatus_len
,
176 const char *flavor_name
,
177 const common_digests_t
*digests
,
178 const uint8_t *sha3_as_signed
,
181 cached_dir_t
*new_networkstatus
;
182 cached_dir_t
*old_networkstatus
;
183 if (!cached_consensuses
)
184 cached_consensuses
= strmap_new();
187 new_cached_dir(tor_memdup_nulterm(networkstatus
, networkstatus_len
),
189 memcpy(&new_networkstatus
->digests
, digests
, sizeof(common_digests_t
));
190 memcpy(&new_networkstatus
->digest_sha3_as_signed
, sha3_as_signed
,
192 old_networkstatus
= strmap_set(cached_consensuses
, flavor_name
,
194 if (old_networkstatus
)
195 cached_dir_decref(old_networkstatus
);
198 /** Return the latest downloaded consensus networkstatus in encoded, signed,
199 * optionally compressed format, suitable for sending to clients. */
200 MOCK_IMPL(cached_dir_t
*,
201 dirserv_get_consensus
,(const char *flavor_name
))
203 if (!cached_consensuses
)
205 return strmap_get(cached_consensuses
, flavor_name
);
208 /** As dir_split_resource_into_fingerprints, but instead fills
209 * <b>spool_out</b> with a list of spoolable_resource_t for the resource
210 * identified through <b>source</b>. */
212 dir_split_resource_into_spoolable(const char *resource
,
213 dir_spool_source_t source
,
214 smartlist_t
*spool_out
,
218 smartlist_t
*fingerprints
= smartlist_new();
220 tor_assert(flags
& (DSR_HEX
|DSR_BASE64
));
221 const size_t digest_len
=
222 (flags
& DSR_DIGEST256
) ? DIGEST256_LEN
: DIGEST_LEN
;
224 int r
= dir_split_resource_into_fingerprints(resource
, fingerprints
,
225 compressed_out
, flags
);
226 /* This is not a very efficient implementation XXXX */
227 SMARTLIST_FOREACH_BEGIN(fingerprints
, uint8_t *, digest
) {
228 spooled_resource_t
*spooled
=
229 spooled_resource_new(source
, digest
, digest_len
);
231 smartlist_add(spool_out
, spooled
);
233 } SMARTLIST_FOREACH_END(digest
);
235 smartlist_free(fingerprints
);
239 /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t
240 * pointers, adds copies of digests to fps_out, and doesn't use the
241 * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other
242 * requests, adds identity digests.
245 dirserv_get_routerdesc_spool(smartlist_t
*spool_out
,
247 dir_spool_source_t source
,
248 int conn_is_encrypted
,
249 const char **msg_out
)
253 if (!strcmp(key
, "all")) {
254 const routerlist_t
*rl
= router_get_routerlist();
255 SMARTLIST_FOREACH_BEGIN(rl
->routers
, const routerinfo_t
*, r
) {
256 spooled_resource_t
*spooled
;
257 spooled
= spooled_resource_new(source
,
258 (const uint8_t *)r
->cache_info
.identity_digest
,
260 /* Treat "all" requests as if they were unencrypted */
261 conn_is_encrypted
= 0;
262 smartlist_add(spool_out
, spooled
);
263 } SMARTLIST_FOREACH_END(r
);
264 } else if (!strcmp(key
, "authority")) {
265 const routerinfo_t
*ri
= router_get_my_routerinfo();
267 smartlist_add(spool_out
,
268 spooled_resource_new(source
,
269 (const uint8_t *)ri
->cache_info
.identity_digest
,
271 } else if (!strcmpstart(key
, "d/")) {
273 dir_split_resource_into_spoolable(key
, source
, spool_out
, NULL
,
274 DSR_HEX
|DSR_SORT_UNIQ
);
275 } else if (!strcmpstart(key
, "fp/")) {
276 key
+= strlen("fp/");
277 dir_split_resource_into_spoolable(key
, source
, spool_out
, NULL
,
278 DSR_HEX
|DSR_SORT_UNIQ
);
280 *msg_out
= "Not found";
284 if (! conn_is_encrypted
) {
285 /* Remove anything that insists it not be sent unencrypted. */
286 SMARTLIST_FOREACH_BEGIN(spool_out
, spooled_resource_t
*, spooled
) {
287 const uint8_t *body
= NULL
;
289 int r
= spooled_resource_lookup_body(spooled
, conn_is_encrypted
,
290 &body
, &bodylen
, NULL
);
291 if (r
< 0 || body
== NULL
|| bodylen
== 0) {
292 SMARTLIST_DEL_CURRENT(spool_out
, spooled
);
293 spooled_resource_free(spooled
);
295 } SMARTLIST_FOREACH_END(spooled
);
298 if (!smartlist_len(spool_out
)) {
299 *msg_out
= "Servers unavailable";
310 spooled_resource_new(dir_spool_source_t source
,
311 const uint8_t *digest
, size_t digestlen
)
313 spooled_resource_t
*spooled
= tor_malloc_zero(sizeof(spooled_resource_t
));
314 spooled
->spool_source
= source
;
316 case DIR_SPOOL_NETWORKSTATUS
:
317 spooled
->spool_eagerly
= 0;
319 case DIR_SPOOL_SERVER_BY_DIGEST
:
320 case DIR_SPOOL_SERVER_BY_FP
:
321 case DIR_SPOOL_EXTRA_BY_DIGEST
:
322 case DIR_SPOOL_EXTRA_BY_FP
:
323 case DIR_SPOOL_MICRODESC
:
325 spooled
->spool_eagerly
= 1;
327 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY
:
328 tor_assert_unreached();
331 tor_assert(digestlen
<= sizeof(spooled
->digest
));
333 memcpy(spooled
->digest
, digest
, digestlen
);
338 * Create a new spooled_resource_t to spool the contents of <b>entry</b> to
339 * the user. Return the spooled object on success, or NULL on failure (which
340 * is probably caused by a failure to map the body of the item from disk).
342 * Adds a reference to entry's reference counter.
345 spooled_resource_new_from_cache_entry(consensus_cache_entry_t
*entry
)
347 spooled_resource_t
*spooled
= tor_malloc_zero(sizeof(spooled_resource_t
));
348 spooled
->spool_source
= DIR_SPOOL_CONSENSUS_CACHE_ENTRY
;
349 spooled
->spool_eagerly
= 0;
350 consensus_cache_entry_incref(entry
);
351 spooled
->consensus_cache_entry
= entry
;
353 int r
= consensus_cache_entry_get_body(entry
,
359 spooled_resource_free(spooled
);
364 /** Release all storage held by <b>spooled</b>. */
366 spooled_resource_free_(spooled_resource_t
*spooled
)
371 if (spooled
->cached_dir_ref
) {
372 cached_dir_decref(spooled
->cached_dir_ref
);
375 if (spooled
->consensus_cache_entry
) {
376 consensus_cache_entry_decref(spooled
->consensus_cache_entry
);
382 /** When spooling data from a cached_dir_t object, we always add
383 * at least this much. */
384 #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192
386 /** Return an compression ratio for compressing objects from <b>source</b>.
389 estimate_compression_ratio(dir_spool_source_t source
)
391 /* We should put in better estimates here, depending on the number of
392 objects and their type */
397 /** Return an estimated number of bytes needed for transmitting the
398 * resource in <b>spooled</b> on <b>conn</b>
400 * As a convenient side-effect, set *<b>published_out</b> to the resource's
404 spooled_resource_estimate_size(const spooled_resource_t
*spooled
,
405 dir_connection_t
*conn
,
407 time_t *published_out
)
409 if (spooled
->spool_eagerly
) {
410 const uint8_t *body
= NULL
;
412 int r
= spooled_resource_lookup_body(spooled
,
413 connection_dir_is_encrypted(conn
),
416 if (r
== -1 || body
== NULL
|| bodylen
== 0)
419 double ratio
= estimate_compression_ratio(spooled
->spool_source
);
420 bodylen
= (size_t)(bodylen
* ratio
);
424 cached_dir_t
*cached
;
425 if (spooled
->consensus_cache_entry
) {
427 consensus_cache_entry_get_valid_after(
428 spooled
->consensus_cache_entry
, published_out
);
431 return spooled
->cce_len
;
433 if (spooled
->cached_dir_ref
) {
434 cached
= spooled
->cached_dir_ref
;
436 cached
= spooled_resource_lookup_cached_dir(spooled
,
439 if (cached
== NULL
) {
442 size_t result
= compressed
? cached
->dir_compressed_len
: cached
->dir_len
;
447 /** Return code for spooled_resource_flush_some */
452 } spooled_resource_flush_status_t
;
454 /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>.
455 * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from
456 * this spooled resource, or SRFS_DONE if we are done flushing this spooled
459 static spooled_resource_flush_status_t
460 spooled_resource_flush_some(spooled_resource_t
*spooled
,
461 dir_connection_t
*conn
)
463 if (spooled
->spool_eagerly
) {
464 /* Spool_eagerly resources are sent all-at-once. */
465 const uint8_t *body
= NULL
;
467 int r
= spooled_resource_lookup_body(spooled
,
468 connection_dir_is_encrypted(conn
),
469 &body
, &bodylen
, NULL
);
470 if (r
== -1 || body
== NULL
|| bodylen
== 0) {
471 /* Absent objects count as "done". */
475 connection_dir_buf_add((const char*)body
, bodylen
, conn
, 0);
479 cached_dir_t
*cached
= spooled
->cached_dir_ref
;
480 consensus_cache_entry_t
*cce
= spooled
->consensus_cache_entry
;
481 if (cached
== NULL
&& cce
== NULL
) {
482 /* The cached_dir_t hasn't been materialized yet. So let's look it up. */
483 cached
= spooled
->cached_dir_ref
=
484 spooled_resource_lookup_cached_dir(spooled
, NULL
);
486 /* Absent objects count as done. */
490 tor_assert_nonfatal(spooled
->cached_dir_offset
== 0);
493 if (BUG(!cached
&& !cce
))
499 total_len
= cached
->dir_compressed_len
;
500 ptr
= cached
->dir_compressed
;
502 total_len
= spooled
->cce_len
;
503 ptr
= (const char *)spooled
->cce_body
;
505 /* How many bytes left to flush? */
507 remaining
= total_len
- spooled
->cached_dir_offset
;
508 if (BUG(remaining
< 0))
510 ssize_t bytes
= (ssize_t
) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE
, remaining
);
512 connection_dir_buf_add(ptr
+ spooled
->cached_dir_offset
,
515 spooled
->cached_dir_offset
+= bytes
;
516 if (spooled
->cached_dir_offset
>= (off_t
)total_len
) {
524 /** Helper: find the cached_dir_t for a spooled_resource_t, for
525 * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided,
526 * to the published time of the cached_dir_t.
528 * DOES NOT increase the reference count on the result. Callers must do that
529 * themselves if they mean to hang on to it.
531 static cached_dir_t
*
532 spooled_resource_lookup_cached_dir(const spooled_resource_t
*spooled
,
533 time_t *published_out
)
535 tor_assert(spooled
->spool_eagerly
== 0);
536 cached_dir_t
*d
= lookup_cached_dir_by_fp(spooled
->digest
);
539 *published_out
= d
->published
;
544 /** Helper: Look up the body for an eagerly-served spooled_resource. If
545 * <b>conn_is_encrypted</b> is false, don't look up any resource that
546 * shouldn't be sent over an unencrypted connection. On success, set
547 * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer
548 * to the resource's body, size, and publication date, and return 0.
549 * On failure return -1. */
551 spooled_resource_lookup_body(const spooled_resource_t
*spooled
,
552 int conn_is_encrypted
,
553 const uint8_t **body_out
,
555 time_t *published_out
)
557 tor_assert(spooled
->spool_eagerly
== 1);
559 const signed_descriptor_t
*sd
= NULL
;
561 switch (spooled
->spool_source
) {
562 case DIR_SPOOL_EXTRA_BY_FP
: {
563 sd
= get_signed_descriptor_by_fp(spooled
->digest
, 1);
566 case DIR_SPOOL_SERVER_BY_FP
: {
567 sd
= get_signed_descriptor_by_fp(spooled
->digest
, 0);
570 case DIR_SPOOL_SERVER_BY_DIGEST
: {
571 sd
= router_get_by_descriptor_digest((const char *)spooled
->digest
);
574 case DIR_SPOOL_EXTRA_BY_DIGEST
: {
575 sd
= extrainfo_get_by_descriptor_digest((const char *)spooled
->digest
);
578 case DIR_SPOOL_MICRODESC
: {
579 microdesc_t
*md
= microdesc_cache_lookup_by_digest256(
580 get_microdesc_cache(),
581 (const char *)spooled
->digest
);
582 if (! md
|| ! md
->body
) {
585 *body_out
= (const uint8_t *)md
->body
;
586 *size_out
= md
->bodylen
;
588 *published_out
= TIME_MAX
;
591 case DIR_SPOOL_NETWORKSTATUS
:
592 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY
:
594 /* LCOV_EXCL_START */
595 tor_assert_nonfatal_unreached();
600 /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */
605 if (sd
->send_unencrypted
== 0 && ! conn_is_encrypted
) {
606 /* we did this check once before (so we could have an accurate size
607 * estimate and maybe send a 404 if somebody asked for only bridges on
608 * a connection), but we need to do it again in case a previously
609 * unknown bridge descriptor has shown up between then and now. */
612 *body_out
= (const uint8_t *) signed_descriptor_get_body(sd
);
613 *size_out
= sd
->signed_descriptor_len
;
615 *published_out
= sd
->published_on
;
619 /** Given a fingerprint <b>fp</b> which is either set if we're looking for a
620 * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded
621 * flavor name if we want a flavored v3 status, return a pointer to the
622 * appropriate cached dir object, or NULL if there isn't one available. */
623 static cached_dir_t
*
624 lookup_cached_dir_by_fp(const uint8_t *fp
)
626 cached_dir_t
*d
= NULL
;
627 if (tor_digest_is_zero((const char *)fp
) && cached_consensuses
) {
628 d
= strmap_get(cached_consensuses
, "ns");
629 } else if (memchr(fp
, '\0', DIGEST_LEN
) && cached_consensuses
) {
630 /* this here interface is a nasty hack: we're shoving a flavor into
632 d
= strmap_get(cached_consensuses
, (const char *)fp
);
637 /** Try to guess the number of bytes that will be needed to send the
638 * spooled objects for <b>conn</b>'s outgoing spool. In the process,
639 * remove every element of the spool that refers to an absent object, or
640 * which was published earlier than <b>cutoff</b>. Set *<b>size_out</b>
641 * to the number of bytes, and *<b>n_expired_out</b> to the number of
642 * objects removed for being too old. */
644 dirserv_spool_remove_missing_and_guess_size(dir_connection_t
*conn
,
653 smartlist_t
*spool
= conn
->spool
;
663 SMARTLIST_FOREACH_BEGIN(spool
, spooled_resource_t
*, spooled
) {
664 time_t published
= TIME_MAX
;
665 size_t sz
= spooled_resource_estimate_size(spooled
, conn
,
666 compression
, &published
);
667 if (published
< cutoff
) {
669 SMARTLIST_DEL_CURRENT(spool
, spooled
);
670 spooled_resource_free(spooled
);
671 } else if (sz
== 0) {
672 SMARTLIST_DEL_CURRENT(spool
, spooled
);
673 spooled_resource_free(spooled
);
677 } SMARTLIST_FOREACH_END(spooled
);
680 *size_out
= (total
> SIZE_MAX
) ? SIZE_MAX
: (size_t)total
;
683 *n_expired_out
= n_expired
;
686 /** Helper: used to sort a connection's spool. */
688 dirserv_spool_sort_comparison_(const void **a_
, const void **b_
)
690 const spooled_resource_t
*a
= *a_
;
691 const spooled_resource_t
*b
= *b_
;
692 return fast_memcmp(a
->digest
, b
->digest
, sizeof(a
->digest
));
695 /** Sort all the entries in <b>conn</b> by digest. */
697 dirserv_spool_sort(dir_connection_t
*conn
)
699 if (conn
->spool
== NULL
)
701 smartlist_sort(conn
->spool
, dirserv_spool_sort_comparison_
);
704 /** Return the cache-info for identity fingerprint <b>fp</b>, or
705 * its extra-info document if <b>extrainfo</b> is true. Return
706 * NULL if not found or if the descriptor is older than
707 * <b>publish_cutoff</b>. */
708 static const signed_descriptor_t
*
709 get_signed_descriptor_by_fp(const uint8_t *fp
, int extrainfo
)
711 if (router_digest_is_me((const char *)fp
)) {
713 return &(router_get_my_extrainfo()->cache_info
);
715 return &(router_get_my_routerinfo()->cache_info
);
717 const routerinfo_t
*ri
= router_get_by_id_digest((const char *)fp
);
720 return extrainfo_get_by_descriptor_digest(
721 ri
->cache_info
.extra_info_digest
);
723 return &ri
->cache_info
;
729 /** When we're spooling data onto our outbuf, add more whenever we dip
730 * below this threshold. */
731 #define DIRSERV_BUFFER_MIN 16384
734 * Called whenever we have flushed some directory data in state
735 * SERVER_WRITING, or whenever we want to fill the buffer with initial
736 * directory data (so that subsequent writes will occur, and trigger this
739 * Return 0 on success, and -1 on failure.
742 connection_dirserv_flushed_some(dir_connection_t
*conn
)
744 tor_assert(conn
->base_
.state
== DIR_CONN_STATE_SERVER_WRITING
);
745 if (conn
->spool
== NULL
)
748 while (connection_get_outbuf_len(TO_CONN(conn
)) < DIRSERV_BUFFER_MIN
&&
749 smartlist_len(conn
->spool
)) {
750 spooled_resource_t
*spooled
=
751 smartlist_get(conn
->spool
, smartlist_len(conn
->spool
)-1);
752 spooled_resource_flush_status_t status
;
753 status
= spooled_resource_flush_some(spooled
, conn
);
754 if (status
== SRFS_ERR
) {
756 } else if (status
== SRFS_MORE
) {
759 tor_assert(status
== SRFS_DONE
);
761 /* If we're here, we're done flushing this resource. */
762 tor_assert(smartlist_pop_last(conn
->spool
) == spooled
);
763 spooled_resource_free(spooled
);
766 if (smartlist_len(conn
->spool
) > 0) {
767 /* We're still spooling something. */
771 /* If we get here, we're done. */
772 smartlist_free(conn
->spool
);
774 if (conn
->compress_state
) {
775 /* Flush the compression state: there could be more bytes pending in there,
776 * and we don't want to omit bytes. */
777 connection_buf_add_compress("", 0, conn
, 1);
778 tor_compress_free(conn
->compress_state
);
779 conn
->compress_state
= NULL
;
784 /** Remove every element from <b>conn</b>'s outgoing spool, and delete
787 dir_conn_clear_spool(dir_connection_t
*conn
)
789 if (!conn
|| ! conn
->spool
)
791 SMARTLIST_FOREACH(conn
->spool
, spooled_resource_t
*, s
,
792 spooled_resource_free(s
));
793 smartlist_free(conn
->spool
);
797 /** Release all storage used by the directory server. */
799 dirserv_free_all(void)
801 strmap_free(cached_consensuses
, free_cached_dir_
);
802 cached_consensuses
= NULL
;