Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / lib / flex_proportions.c
blob45154393752433bdb4c56e7581e8a575b99cb5ac
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Floating proportions with flexible aging period
5 * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
7 * The goal of this code is: Given different types of event, measure proportion
8 * of each type of event over time. The proportions are measured with
9 * exponentially decaying history to give smooth transitions. A formula
10 * expressing proportion of event of type 'j' is:
12 * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15 * total number of events in i-th last time period.
17 * Note that p_{j}'s are normalised, i.e.
19 * \Sum_{j} p_{j} = 1,
21 * This formula can be straightforwardly computed by maintaining denominator
22 * (let's call it 'd') and for each event type its numerator (let's call it
23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
24 * n_j++; d++;
26 * When a new period is declared, we could do:
27 * d /= 2
28 * for each j
29 * n_j /= 2
31 * To avoid iteration over all event types, we instead shift numerator of event
32 * j lazily when someone asks for a proportion of event j or when event j
33 * occurs. This can bit trivially implemented by remembering last period in
34 * which something happened with proportion of type j.
36 #include <linux/flex_proportions.h>
38 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
40 int err;
42 p->period = 0;
43 /* Use 1 to avoid dealing with periods with 0 events... */
44 err = percpu_counter_init(&p->events, 1, gfp);
45 if (err)
46 return err;
47 seqcount_init(&p->sequence);
48 return 0;
51 void fprop_global_destroy(struct fprop_global *p)
53 percpu_counter_destroy(&p->events);
57 * Declare @periods new periods. It is upto the caller to make sure period
58 * transitions cannot happen in parallel.
60 * The function returns true if the proportions are still defined and false
61 * if aging zeroed out all events. This can be used to detect whether declaring
62 * further periods has any effect.
64 bool fprop_new_period(struct fprop_global *p, int periods)
66 s64 events;
67 unsigned long flags;
69 local_irq_save(flags);
70 events = percpu_counter_sum(&p->events);
72 * Don't do anything if there are no events.
74 if (events <= 1) {
75 local_irq_restore(flags);
76 return false;
78 write_seqcount_begin(&p->sequence);
79 if (periods < 64)
80 events -= events >> periods;
81 /* Use addition to avoid losing events happening between sum and set */
82 percpu_counter_add(&p->events, -events);
83 p->period += periods;
84 write_seqcount_end(&p->sequence);
85 local_irq_restore(flags);
87 return true;
91 * ---- SINGLE ----
94 int fprop_local_init_single(struct fprop_local_single *pl)
96 pl->events = 0;
97 pl->period = 0;
98 raw_spin_lock_init(&pl->lock);
99 return 0;
102 void fprop_local_destroy_single(struct fprop_local_single *pl)
106 static void fprop_reflect_period_single(struct fprop_global *p,
107 struct fprop_local_single *pl)
109 unsigned int period = p->period;
110 unsigned long flags;
112 /* Fast path - period didn't change */
113 if (pl->period == period)
114 return;
115 raw_spin_lock_irqsave(&pl->lock, flags);
116 /* Someone updated pl->period while we were spinning? */
117 if (pl->period >= period) {
118 raw_spin_unlock_irqrestore(&pl->lock, flags);
119 return;
121 /* Aging zeroed our fraction? */
122 if (period - pl->period < BITS_PER_LONG)
123 pl->events >>= period - pl->period;
124 else
125 pl->events = 0;
126 pl->period = period;
127 raw_spin_unlock_irqrestore(&pl->lock, flags);
130 /* Event of type pl happened */
131 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
133 fprop_reflect_period_single(p, pl);
134 pl->events++;
135 percpu_counter_add(&p->events, 1);
138 /* Return fraction of events of type pl */
139 void fprop_fraction_single(struct fprop_global *p,
140 struct fprop_local_single *pl,
141 unsigned long *numerator, unsigned long *denominator)
143 unsigned int seq;
144 s64 num, den;
146 do {
147 seq = read_seqcount_begin(&p->sequence);
148 fprop_reflect_period_single(p, pl);
149 num = pl->events;
150 den = percpu_counter_read_positive(&p->events);
151 } while (read_seqcount_retry(&p->sequence, seq));
154 * Make fraction <= 1 and denominator > 0 even in presence of percpu
155 * counter errors
157 if (den <= num) {
158 if (num)
159 den = num;
160 else
161 den = 1;
163 *denominator = den;
164 *numerator = num;
168 * ---- PERCPU ----
170 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
172 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
174 int err;
176 err = percpu_counter_init(&pl->events, 0, gfp);
177 if (err)
178 return err;
179 pl->period = 0;
180 raw_spin_lock_init(&pl->lock);
181 return 0;
184 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
186 percpu_counter_destroy(&pl->events);
189 static void fprop_reflect_period_percpu(struct fprop_global *p,
190 struct fprop_local_percpu *pl)
192 unsigned int period = p->period;
193 unsigned long flags;
195 /* Fast path - period didn't change */
196 if (pl->period == period)
197 return;
198 raw_spin_lock_irqsave(&pl->lock, flags);
199 /* Someone updated pl->period while we were spinning? */
200 if (pl->period >= period) {
201 raw_spin_unlock_irqrestore(&pl->lock, flags);
202 return;
204 /* Aging zeroed our fraction? */
205 if (period - pl->period < BITS_PER_LONG) {
206 s64 val = percpu_counter_read(&pl->events);
208 if (val < (nr_cpu_ids * PROP_BATCH))
209 val = percpu_counter_sum(&pl->events);
211 percpu_counter_add_batch(&pl->events,
212 -val + (val >> (period-pl->period)), PROP_BATCH);
213 } else
214 percpu_counter_set(&pl->events, 0);
215 pl->period = period;
216 raw_spin_unlock_irqrestore(&pl->lock, flags);
219 /* Event of type pl happened */
220 void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
222 fprop_reflect_period_percpu(p, pl);
223 percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
224 percpu_counter_add(&p->events, 1);
227 void fprop_fraction_percpu(struct fprop_global *p,
228 struct fprop_local_percpu *pl,
229 unsigned long *numerator, unsigned long *denominator)
231 unsigned int seq;
232 s64 num, den;
234 do {
235 seq = read_seqcount_begin(&p->sequence);
236 fprop_reflect_period_percpu(p, pl);
237 num = percpu_counter_read_positive(&pl->events);
238 den = percpu_counter_read_positive(&p->events);
239 } while (read_seqcount_retry(&p->sequence, seq));
242 * Make fraction <= 1 and denominator > 0 even in presence of percpu
243 * counter errors
245 if (den <= num) {
246 if (num)
247 den = num;
248 else
249 den = 1;
251 *denominator = den;
252 *numerator = num;
256 * Like __fprop_inc_percpu() except that event is counted only if the given
257 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
259 void __fprop_inc_percpu_max(struct fprop_global *p,
260 struct fprop_local_percpu *pl, int max_frac)
262 if (unlikely(max_frac < FPROP_FRAC_BASE)) {
263 unsigned long numerator, denominator;
265 fprop_fraction_percpu(p, pl, &numerator, &denominator);
266 if (numerator >
267 (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
268 return;
271 __fprop_inc_percpu(p, pl);