Linux 6.13-rc4
[linux.git] / fs / bcachefs / time_stats.c
blob3fe82757f93aacc83c4989b6dfc0b8b4af23acd2
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/time.h>
8 #include <linux/spinlock.h>
10 #include "eytzinger.h"
11 #include "time_stats.h"
13 static const struct time_unit time_units[] = {
14 { "ns", 1 },
15 { "us", NSEC_PER_USEC },
16 { "ms", NSEC_PER_MSEC },
17 { "s", NSEC_PER_SEC },
18 { "m", (u64) NSEC_PER_SEC * 60},
19 { "h", (u64) NSEC_PER_SEC * 3600},
20 { "d", (u64) NSEC_PER_SEC * 3600 * 24},
21 { "w", (u64) NSEC_PER_SEC * 3600 * 24 * 7},
22 { "y", (u64) NSEC_PER_SEC * ((3600 * 24 * 7 * 365) + (3600 * (24 / 4) * 7))}, /* 365.25d */
23 { "eon", U64_MAX },
26 const struct time_unit *bch2_pick_time_units(u64 ns)
28 const struct time_unit *u;
30 for (u = time_units;
31 u + 1 < time_units + ARRAY_SIZE(time_units) &&
32 ns >= u[1].nsecs << 1;
33 u++)
36 return u;
39 static void quantiles_update(struct quantiles *q, u64 v)
41 unsigned i = 0;
43 while (i < ARRAY_SIZE(q->entries)) {
44 struct quantile_entry *e = q->entries + i;
46 if (unlikely(!e->step)) {
47 e->m = v;
48 e->step = max_t(unsigned, v / 2, 1024);
49 } else if (e->m > v) {
50 e->m = e->m >= e->step
51 ? e->m - e->step
52 : 0;
53 } else if (e->m < v) {
54 e->m = e->m + e->step > e->m
55 ? e->m + e->step
56 : U32_MAX;
59 if ((e->m > v ? e->m - v : v - e->m) < e->step)
60 e->step = max_t(unsigned, e->step / 2, 1);
62 if (v >= e->m)
63 break;
65 i = eytzinger0_child(i, v > e->m);
69 static inline void time_stats_update_one(struct bch2_time_stats *stats,
70 u64 start, u64 end)
72 u64 duration, freq;
73 bool initted = stats->last_event != 0;
75 if (time_after64(end, start)) {
76 struct quantiles *quantiles = time_stats_to_quantiles(stats);
78 duration = end - start;
79 mean_and_variance_update(&stats->duration_stats, duration);
80 mean_and_variance_weighted_update(&stats->duration_stats_weighted,
81 duration, initted, TIME_STATS_MV_WEIGHT);
82 stats->max_duration = max(stats->max_duration, duration);
83 stats->min_duration = min(stats->min_duration, duration);
84 stats->total_duration += duration;
86 if (quantiles)
87 quantiles_update(quantiles, duration);
90 if (stats->last_event && time_after64(end, stats->last_event)) {
91 freq = end - stats->last_event;
92 mean_and_variance_update(&stats->freq_stats, freq);
93 mean_and_variance_weighted_update(&stats->freq_stats_weighted,
94 freq, initted, TIME_STATS_MV_WEIGHT);
95 stats->max_freq = max(stats->max_freq, freq);
96 stats->min_freq = min(stats->min_freq, freq);
99 stats->last_event = end;
102 void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
103 struct time_stat_buffer *b)
105 for (struct time_stat_buffer_entry *i = b->entries;
106 i < b->entries + ARRAY_SIZE(b->entries);
107 i++)
108 time_stats_update_one(stats, i->start, i->end);
109 b->nr = 0;
112 static noinline void time_stats_clear_buffer(struct bch2_time_stats *stats,
113 struct time_stat_buffer *b)
115 unsigned long flags;
117 spin_lock_irqsave(&stats->lock, flags);
118 __bch2_time_stats_clear_buffer(stats, b);
119 spin_unlock_irqrestore(&stats->lock, flags);
122 void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
124 unsigned long flags;
126 if (!stats->buffer) {
127 spin_lock_irqsave(&stats->lock, flags);
128 time_stats_update_one(stats, start, end);
130 if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT) < 32 &&
131 stats->duration_stats.n > 1024)
132 stats->buffer =
133 alloc_percpu_gfp(struct time_stat_buffer,
134 GFP_ATOMIC);
135 spin_unlock_irqrestore(&stats->lock, flags);
136 } else {
137 struct time_stat_buffer *b;
139 preempt_disable();
140 b = this_cpu_ptr(stats->buffer);
142 BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
143 b->entries[b->nr++] = (struct time_stat_buffer_entry) {
144 .start = start,
145 .end = end
148 if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
149 time_stats_clear_buffer(stats, b);
150 preempt_enable();
154 void bch2_time_stats_reset(struct bch2_time_stats *stats)
156 spin_lock_irq(&stats->lock);
157 unsigned offset = offsetof(struct bch2_time_stats, min_duration);
158 memset((void *) stats + offset, 0, sizeof(*stats) - offset);
160 if (stats->buffer) {
161 int cpu;
162 for_each_possible_cpu(cpu)
163 per_cpu_ptr(stats->buffer, cpu)->nr = 0;
165 spin_unlock_irq(&stats->lock);
168 void bch2_time_stats_exit(struct bch2_time_stats *stats)
170 free_percpu(stats->buffer);
173 void bch2_time_stats_init(struct bch2_time_stats *stats)
175 memset(stats, 0, sizeof(*stats));
176 stats->min_duration = U64_MAX;
177 stats->min_freq = U64_MAX;
178 spin_lock_init(&stats->lock);