The eleventh batch
[git/gitster.git] / trace2 / tr2_ctr.c
blobee17bfa86b401b675b332d7990514806371b2839
1 #include "git-compat-util.h"
2 #include "trace2/tr2_tgt.h"
3 #include "trace2/tr2_tls.h"
4 #include "trace2/tr2_ctr.h"
6 /*
7 * A global counter block to aggregate values from the partial sums
8 * from each thread.
9 */
10 static struct tr2_counter_block final_counter_block; /* access under tr2tls_mutex */
13 * Define metadata for each global counter.
15 * This array must match the "enum trace2_counter_id" and the values
16 * in "struct tr2_counter_block.counter[*]".
18 static struct tr2_counter_metadata tr2_counter_metadata[TRACE2_NUMBER_OF_COUNTERS] = {
19 [TRACE2_COUNTER_ID_TEST1] = {
20 .category = "test",
21 .name = "test1",
22 .want_per_thread_events = 0,
24 [TRACE2_COUNTER_ID_TEST2] = {
25 .category = "test",
26 .name = "test2",
27 .want_per_thread_events = 1,
29 [TRACE2_COUNTER_ID_PACKED_REFS_JUMPS] = {
30 .category = "packed-refs",
31 .name = "jumps_made",
32 .want_per_thread_events = 0,
34 [TRACE2_COUNTER_ID_REFTABLE_RESEEKS] = {
35 .category = "reftable",
36 .name = "reseeks_made",
37 .want_per_thread_events = 0,
39 [TRACE2_COUNTER_ID_FSYNC_WRITEOUT_ONLY] = {
40 .category = "fsync",
41 .name = "writeout-only",
42 .want_per_thread_events = 0,
44 [TRACE2_COUNTER_ID_FSYNC_HARDWARE_FLUSH] = {
45 .category = "fsync",
46 .name = "hardware-flush",
47 .want_per_thread_events = 0,
50 /* Add additional metadata before here. */
53 void tr2_counter_increment(enum trace2_counter_id cid, uint64_t value)
55 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
56 struct tr2_counter *c = &ctx->counter_block.counter[cid];
58 c->value += value;
60 ctx->used_any_counter = 1;
61 if (tr2_counter_metadata[cid].want_per_thread_events)
62 ctx->used_any_per_thread_counter = 1;
65 void tr2_update_final_counters(void)
67 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
68 enum trace2_counter_id cid;
70 if (!ctx->used_any_counter)
71 return;
74 * Access `final_counter_block` requires holding `tr2tls_mutex`.
75 * We assume that our caller is holding the lock.
78 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++) {
79 struct tr2_counter *c_final = &final_counter_block.counter[cid];
80 const struct tr2_counter *c = &ctx->counter_block.counter[cid];
82 c_final->value += c->value;
86 void tr2_emit_per_thread_counters(tr2_tgt_evt_counter_t *fn_apply)
88 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
89 enum trace2_counter_id cid;
91 if (!ctx->used_any_per_thread_counter)
92 return;
95 * For each counter, if the counter wants per-thread events
96 * and this thread used it (the value is non-zero), emit it.
98 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
99 if (tr2_counter_metadata[cid].want_per_thread_events &&
100 ctx->counter_block.counter[cid].value)
101 fn_apply(&tr2_counter_metadata[cid],
102 &ctx->counter_block.counter[cid],
106 void tr2_emit_final_counters(tr2_tgt_evt_counter_t *fn_apply)
108 enum trace2_counter_id cid;
111 * Access `final_counter_block` requires holding `tr2tls_mutex`.
112 * We assume that our caller is holding the lock.
115 for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
116 if (final_counter_block.counter[cid].value)
117 fn_apply(&tr2_counter_metadata[cid],
118 &final_counter_block.counter[cid],