1 /* Copyright (c) 2020-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
6 * \brief Test lib/metrics and feature/metrics functionalities
10 #define CONNECTION_PRIVATE
11 #define MAINLOOP_PRIVATE
12 #define METRICS_STORE_ENTRY_PRIVATE
14 #include "test/test.h"
15 #include "test/test_helpers.h"
16 #include "test/log_test_helpers.h"
18 #include "app/config/config.h"
20 #include "core/mainloop/connection.h"
21 #include "core/mainloop/mainloop.h"
22 #include "core/or/connection_st.h"
23 #include "core/or/policies.h"
24 #include "core/or/port_cfg_st.h"
26 #include "feature/metrics/metrics.h"
28 #include "lib/encoding/confline.h"
29 #include "lib/metrics/metrics_store.h"
33 #define TEST_METRICS_ENTRY_NAME "entryA"
34 #define TEST_METRICS_ENTRY_HELP "Description of entryA"
35 #define TEST_METRICS_ENTRY_LABEL_1 "label=\"farfadet\""
36 #define TEST_METRICS_ENTRY_LABEL_2 "label=\"ponki\""
38 #define TEST_METRICS_HIST_ENTRY_NAME "test_hist_entry"
39 #define TEST_METRICS_HIST_ENTRY_HELP "Description of test_hist_entry"
42 set_metrics_port(or_options_t
*options
)
44 const char *port
= "MetricsPort 9035"; /* Default to 127.0.0.1 */
45 const char *policy
= "MetricsPortPolicy accept 1.2.3.4";
47 config_get_lines(port
, &options
->MetricsPort_lines
, 0);
48 config_get_lines(policy
, &options
->MetricsPortPolicy
, 0);
50 /* Parse and validate policy. */
51 policies_parse_from_options(options
);
55 test_config(void *arg
)
59 smartlist_t
*ports
= smartlist_new();
60 or_options_t
*options
= get_options_mutable();
64 set_metrics_port(options
);
66 int ret
= metrics_parse_ports(options
, ports
, &err_msg
);
67 tt_int_op(ret
, OP_EQ
, 0);
68 tt_int_op(smartlist_len(ports
), OP_EQ
, 1);
70 /* Validate the configured port. */
71 const port_cfg_t
*cfg
= smartlist_get(ports
, 0);
72 tt_assert(tor_addr_eq_ipv4h(&cfg
->addr
, 0x7f000001));
73 tt_int_op(cfg
->port
, OP_EQ
, 9035);
74 tt_int_op(cfg
->type
, OP_EQ
, CONN_TYPE_METRICS_LISTENER
);
76 /* Address of the policy should be permitted. */
77 tor_addr_from_ipv4h(&addr
, 0x01020304); /* 1.2.3.4 */
78 ret
= metrics_policy_permits_address(&addr
);
79 tt_int_op(ret
, OP_EQ
, true);
81 /* Anything else, should not. */
82 tor_addr_from_ipv4h(&addr
, 0x01020305); /* 1.2.3.5 */
83 ret
= metrics_policy_permits_address(&addr
);
84 tt_int_op(ret
, OP_EQ
, false);
87 SMARTLIST_FOREACH(ports
, port_cfg_t
*, c
, port_cfg_free(c
));
88 smartlist_free(ports
);
89 or_options_free(options
);
93 static char _c_buf
[256];
94 #define CONTAINS(conn, msg) \
96 tt_int_op(buf_datalen(conn->outbuf), OP_EQ, (strlen(msg))); \
97 memset(_c_buf, 0, sizeof(_c_buf)); \
98 buf_get_bytes(conn->outbuf, _c_buf, (strlen(msg))); \
99 tt_str_op(_c_buf, OP_EQ, (msg)); \
100 tt_int_op(buf_datalen(conn->outbuf), OP_EQ, 0); \
103 #define WRITE(conn, msg) \
104 buf_add(conn->inbuf, (msg), (strlen(msg)));
106 /* Free the previous conn object if any and allocate a new connection. In
107 * order to be allowed, set its address to 1.2.3.4 as per the policy. */
108 #define NEW_ALLOWED_CONN() \
110 close_closeable_connections(); \
111 conn = connection_new(CONN_TYPE_METRICS, AF_INET); \
112 tor_addr_from_ipv4h(&conn->addr, 0x01020304); \
116 test_connection(void *arg
)
119 connection_t
*conn
= NULL
;
120 or_options_t
*options
= get_options_mutable();
124 /* Notice that in this test, we will allocate a new connection at every test
125 * case. This is because the metrics_connection_process_inbuf() marks for
126 * close the connection in case of an error and thus we can't call again an
127 * inbuf process function on a marked for close connection. */
129 tor_init_connection_lists();
132 set_metrics_port(options
);
134 /* Set 1.2.3.5 IP, we should get rejected. */
136 tor_addr_from_ipv4h(&conn
->addr
, 0x01020305);
137 ret
= metrics_connection_process_inbuf(conn
);
138 tt_int_op(ret
, OP_EQ
, -1);
140 /* No HTTP request yet. */
142 ret
= metrics_connection_process_inbuf(conn
);
143 tt_int_op(ret
, OP_EQ
, 0);
144 connection_free_minimal(conn
);
148 WRITE(conn
, "HTTP 4.7\r\n\r\n");
149 ret
= metrics_connection_process_inbuf(conn
);
150 tt_int_op(ret
, OP_EQ
, -1);
151 CONTAINS(conn
, "HTTP/1.0 400 Bad Request\r\n\r\n");
153 /* Path not found. */
155 WRITE(conn
, "GET /badpath HTTP/1.0\r\n\r\n");
156 ret
= metrics_connection_process_inbuf(conn
);
157 tt_int_op(ret
, OP_EQ
, -1);
158 CONTAINS(conn
, "HTTP/1.0 404 Not Found\r\n\r\n");
160 /* Method not allowed. */
162 WRITE(conn
, "POST /something HTTP/1.0\r\n\r\n");
163 ret
= metrics_connection_process_inbuf(conn
);
164 tt_int_op(ret
, OP_EQ
, -1);
165 CONTAINS(conn
, "HTTP/1.0 405 Method Not Allowed\r\n\r\n");
167 /* Ask for metrics. The content should be above 0. We don't test the
168 * validity of the returned content but it is certainly not an error. */
170 WRITE(conn
, "GET /metrics HTTP/1.0\r\n\r\n");
171 ret
= metrics_connection_process_inbuf(conn
);
172 tt_int_op(ret
, OP_EQ
, 0);
173 tt_int_op(buf_datalen(conn
->outbuf
), OP_GT
, 0);
176 or_options_free(options
);
177 connection_free_minimal(conn
);
181 test_prometheus(void *arg
)
183 metrics_store_t
*store
= NULL
;
184 metrics_store_entry_t
*entry
= NULL
;
185 buf_t
*buf
= buf_new();
190 /* Fresh new store. No entries. */
191 store
= metrics_store_new();
194 /* Add entry and validate its content. */
195 entry
= metrics_store_add(store
, METRICS_TYPE_COUNTER
,
196 TEST_METRICS_ENTRY_NAME
,
197 TEST_METRICS_ENTRY_HELP
,
200 metrics_store_entry_add_label(entry
, TEST_METRICS_ENTRY_LABEL_1
);
202 static const char *expected
=
203 "# HELP " TEST_METRICS_ENTRY_NAME
" " TEST_METRICS_ENTRY_HELP
"\n"
204 "# TYPE " TEST_METRICS_ENTRY_NAME
" counter\n"
205 TEST_METRICS_ENTRY_NAME
"{" TEST_METRICS_ENTRY_LABEL_1
"} 0\n";
207 metrics_store_get_output(METRICS_FORMAT_PROMETHEUS
, store
, buf
);
208 output
= buf_extract(buf
, NULL
);
209 tt_str_op(expected
, OP_EQ
, output
);
214 metrics_store_free(store
);
218 test_prometheus_histogram(void *arg
)
220 metrics_store_t
*store
= NULL
;
221 metrics_store_entry_t
*entry
= NULL
;
222 buf_t
*buf
= buf_new();
224 const int64_t buckets
[] = { 10, 20, 3000 };
228 /* Fresh new store. No entries. */
229 store
= metrics_store_new();
232 /* Add a histogram entry and validate its content. */
233 entry
= metrics_store_add(store
, METRICS_TYPE_HISTOGRAM
,
234 TEST_METRICS_HIST_ENTRY_NAME
,
235 TEST_METRICS_HIST_ENTRY_HELP
,
236 ARRAY_LENGTH(buckets
), buckets
);
238 metrics_store_entry_add_label(entry
, TEST_METRICS_ENTRY_LABEL_1
);
240 static const char *expected
=
241 "# HELP " TEST_METRICS_HIST_ENTRY_NAME
" "
242 TEST_METRICS_HIST_ENTRY_HELP
"\n"
243 "# TYPE " TEST_METRICS_HIST_ENTRY_NAME
" histogram\n"
244 TEST_METRICS_HIST_ENTRY_NAME
"_bucket{"
245 TEST_METRICS_ENTRY_LABEL_1
",le=\"10.00\"} 0\n"
246 TEST_METRICS_HIST_ENTRY_NAME
"_bucket{"
247 TEST_METRICS_ENTRY_LABEL_1
",le=\"20.00\"} 0\n"
248 TEST_METRICS_HIST_ENTRY_NAME
"_bucket{"
249 TEST_METRICS_ENTRY_LABEL_1
",le=\"3000.00\"} 0\n"
250 TEST_METRICS_HIST_ENTRY_NAME
"_bucket{"
251 TEST_METRICS_ENTRY_LABEL_1
",le=\"+Inf\"} 0\n"
252 TEST_METRICS_HIST_ENTRY_NAME
"_sum{" TEST_METRICS_ENTRY_LABEL_1
"} 0\n"
253 TEST_METRICS_HIST_ENTRY_NAME
"_count{" TEST_METRICS_ENTRY_LABEL_1
"} 0\n";
255 metrics_store_get_output(METRICS_FORMAT_PROMETHEUS
, store
, buf
);
256 output
= buf_extract(buf
, NULL
);
257 tt_str_op(expected
, OP_EQ
, output
);
262 metrics_store_free(store
);
266 test_store(void *arg
)
268 metrics_store_t
*store
= NULL
;
269 metrics_store_entry_t
*entry
= NULL
;
270 const int64_t buckets
[] = { 10, 20, 3000 };
271 const size_t bucket_count
= ARRAY_LENGTH(buckets
);
275 /* Fresh new store. No entries. */
276 store
= metrics_store_new();
278 tt_assert(!metrics_store_get_all(store
, TEST_METRICS_ENTRY_NAME
));
280 /* Add entry and validate its content. */
281 entry
= metrics_store_add(store
, METRICS_TYPE_COUNTER
,
282 TEST_METRICS_ENTRY_NAME
,
283 TEST_METRICS_ENTRY_HELP
, 0, NULL
);
285 tt_int_op(entry
->type
, OP_EQ
, METRICS_TYPE_COUNTER
);
286 tt_str_op(entry
->name
, OP_EQ
, TEST_METRICS_ENTRY_NAME
);
287 tt_str_op(entry
->help
, OP_EQ
, TEST_METRICS_ENTRY_HELP
);
288 tt_uint_op(entry
->u
.counter
.value
, OP_EQ
, 0);
290 /* Access the entry. */
291 tt_assert(metrics_store_get_all(store
, TEST_METRICS_ENTRY_NAME
));
293 /* Add a label to the entry to make it unique. */
294 metrics_store_entry_add_label(entry
, TEST_METRICS_ENTRY_LABEL_1
);
295 tt_int_op(metrics_store_entry_has_label(entry
, TEST_METRICS_ENTRY_LABEL_1
),
298 /* Update entry's value. */
299 metrics_store_entry_update(entry
, 42);
300 tt_int_op(metrics_store_entry_get_value(entry
), OP_EQ
, 42);
301 metrics_store_entry_update(entry
, 42);
302 tt_int_op(metrics_store_entry_get_value(entry
), OP_EQ
, 84);
303 metrics_store_entry_reset(entry
);
304 tt_int_op(metrics_store_entry_get_value(entry
), OP_EQ
, 0);
306 /* Add a new entry of same name but different label. */
307 /* Add entry and validate its content. */
308 entry
= metrics_store_add(store
, METRICS_TYPE_COUNTER
,
309 TEST_METRICS_ENTRY_NAME
,
310 TEST_METRICS_ENTRY_HELP
, 0, NULL
);
312 metrics_store_entry_add_label(entry
, TEST_METRICS_ENTRY_LABEL_2
);
314 /* Make sure _both_ entries are there. */
315 const smartlist_t
*entries
=
316 metrics_store_get_all(store
, TEST_METRICS_ENTRY_NAME
);
318 tt_int_op(smartlist_len(entries
), OP_EQ
, 2);
320 /* Add a histogram entry and validate its content. */
321 entry
= metrics_store_add(store
, METRICS_TYPE_HISTOGRAM
,
322 TEST_METRICS_HIST_ENTRY_NAME
,
323 TEST_METRICS_HIST_ENTRY_HELP
,
324 bucket_count
, buckets
);
327 tt_int_op(entry
->type
, OP_EQ
, METRICS_TYPE_HISTOGRAM
);
328 tt_str_op(entry
->name
, OP_EQ
, TEST_METRICS_HIST_ENTRY_NAME
);
329 tt_str_op(entry
->help
, OP_EQ
, TEST_METRICS_HIST_ENTRY_HELP
);
330 tt_uint_op(entry
->u
.histogram
.bucket_count
, OP_EQ
, bucket_count
);
332 for (size_t i
= 0; i
< bucket_count
; ++i
) {
333 tt_uint_op(entry
->u
.histogram
.buckets
[i
].bucket
, OP_EQ
, buckets
[i
]);
334 tt_uint_op(entry
->u
.histogram
.buckets
[i
].value
, OP_EQ
, 0);
337 /* Access the entry. */
338 tt_assert(metrics_store_get_all(store
, TEST_METRICS_HIST_ENTRY_NAME
));
340 /* Record various observations. */
341 metrics_store_hist_entry_update(entry
, 3, 11);
342 tt_int_op(metrics_store_hist_entry_get_value(entry
, 10), OP_EQ
, 0);
343 tt_int_op(metrics_store_hist_entry_get_value(entry
, 20), OP_EQ
, 3);
344 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 3);
345 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 3);
346 tt_int_op(metrics_store_hist_entry_get_count(entry
), OP_EQ
, 3);
347 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, 11);
349 metrics_store_hist_entry_update(entry
, 1, 42);
350 tt_int_op(metrics_store_hist_entry_get_value(entry
, 10), OP_EQ
, 0);
351 tt_int_op(metrics_store_hist_entry_get_value(entry
, 20), OP_EQ
, 3);
352 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 4);
353 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 4);
354 tt_int_op(metrics_store_hist_entry_get_count(entry
), OP_EQ
, 4);
355 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, 53);
357 /* Ensure this resets all buckets back to 0. */
358 metrics_store_entry_reset(entry
);
359 for (size_t i
= 0; i
< bucket_count
; ++i
) {
360 tt_uint_op(entry
->u
.histogram
.buckets
[i
].bucket
, OP_EQ
, buckets
[i
]);
361 tt_uint_op(entry
->u
.histogram
.buckets
[i
].value
, OP_EQ
, 0);
364 /* tt_int_op assigns the third argument to a variable of type long, which
365 * overflows on some platforms (e.g. on some 32-bit systems). We disable
366 * these checks for those platforms. */
367 #if LONG_MAX >= INT64_MAX
368 metrics_store_hist_entry_update(entry
, 1, INT64_MAX
- 13);
369 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, INT64_MAX
- 13);
370 metrics_store_hist_entry_update(entry
, 1, 13);
371 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, INT64_MAX
);
372 /* Uh-oh, the sum of all observations is now greater than INT64_MAX. Make
373 * sure we reset the entry instead of overflowing the sum. */
374 metrics_store_hist_entry_update(entry
, 1, 1);
375 tt_int_op(metrics_store_hist_entry_get_value(entry
, 10), OP_EQ
, 1);
376 tt_int_op(metrics_store_hist_entry_get_value(entry
, 20), OP_EQ
, 1);
377 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 1);
378 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 1);
379 tt_int_op(metrics_store_hist_entry_get_count(entry
), OP_EQ
, 1);
380 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, 1);
383 #if LONG_MIN <= INT64_MIN
384 metrics_store_entry_reset(entry
);
385 /* In practice, we're not going to have negative observations (as we only use
386 * histograms for timings, which are always positive), but technically
387 * prometheus _does_ support negative observations. */
388 metrics_store_hist_entry_update(entry
, 1, INT64_MIN
+ 13);
389 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, INT64_MIN
+ 13);
390 metrics_store_hist_entry_update(entry
, 1, -13);
391 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, INT64_MIN
);
392 /* Uh-oh, the sum of all observations is now less than INT64_MIN. Make
393 * sure we reset the entry instead of underflowing the sum. */
394 metrics_store_hist_entry_update(entry
, 1, -1);
395 tt_int_op(metrics_store_hist_entry_get_value(entry
, 10), OP_EQ
, 1);
396 tt_int_op(metrics_store_hist_entry_get_value(entry
, 20), OP_EQ
, 1);
397 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 1);
398 tt_int_op(metrics_store_hist_entry_get_value(entry
, 3000), OP_EQ
, 1);
399 tt_int_op(metrics_store_hist_entry_get_count(entry
), OP_EQ
, 1);
400 tt_int_op(metrics_store_hist_entry_get_sum(entry
), OP_EQ
, -1);
404 metrics_store_free(store
);
407 struct testcase_t metrics_tests
[] = {
409 { "config", test_config
, TT_FORK
, NULL
, NULL
},
410 { "connection", test_connection
, TT_FORK
, NULL
, NULL
},
411 { "prometheus", test_prometheus
, TT_FORK
, NULL
, NULL
},
412 { "prometheus_histogram", test_prometheus_histogram
, TT_FORK
, NULL
, NULL
},
413 { "store", test_store
, TT_FORK
, NULL
, NULL
},