Merge tag 'timers_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[drm/drm-misc.git] / lib / percpu_counter.c
blob2891f94a11c654958fb52f8d965d06b62719500c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Fast batching percpu counters.
4 */
6 #include <linux/percpu_counter.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters);
15 static DEFINE_SPINLOCK(percpu_counters_lock);
16 #endif
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
20 static const struct debug_obj_descr percpu_counter_debug_descr;
22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
24 struct percpu_counter *fbc = addr;
26 switch (state) {
27 case ODEBUG_STATE_ACTIVE:
28 percpu_counter_destroy(fbc);
29 debug_object_free(fbc, &percpu_counter_debug_descr);
30 return true;
31 default:
32 return false;
36 static const struct debug_obj_descr percpu_counter_debug_descr = {
37 .name = "percpu_counter",
38 .fixup_free = percpu_counter_fixup_free,
41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
43 debug_object_init(fbc, &percpu_counter_debug_descr);
44 debug_object_activate(fbc, &percpu_counter_debug_descr);
47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
49 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 debug_object_free(fbc, &percpu_counter_debug_descr);
53 #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55 { }
56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57 { }
58 #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
62 int cpu;
63 unsigned long flags;
65 raw_spin_lock_irqsave(&fbc->lock, flags);
66 for_each_possible_cpu(cpu) {
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 *pcount = 0;
70 fbc->count = amount;
71 raw_spin_unlock_irqrestore(&fbc->lock, flags);
73 EXPORT_SYMBOL(percpu_counter_set);
76 * Add to a counter while respecting batch size.
78 * There are 2 implementations, both dealing with the following problem:
80 * The decision slow path/fast path and the actual update must be atomic.
81 * Otherwise a call in process context could check the current values and
82 * decide that the fast path can be used. If now an interrupt occurs before
83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
84 * then the this_cpu_add() that is executed after the interrupt has completed
85 * can produce values larger than "batch" or even overflows.
87 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
89 * Safety against interrupts is achieved in 2 ways:
90 * 1. the fast path uses local cmpxchg (note: no lock prefix)
91 * 2. the slow path operates with interrupts disabled
93 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
95 s64 count;
96 unsigned long flags;
98 count = this_cpu_read(*fbc->counters);
99 do {
100 if (unlikely(abs(count + amount) >= batch)) {
101 raw_spin_lock_irqsave(&fbc->lock, flags);
103 * Note: by now we might have migrated to another CPU
104 * or the value might have changed.
106 count = __this_cpu_read(*fbc->counters);
107 fbc->count += count + amount;
108 __this_cpu_sub(*fbc->counters, count);
109 raw_spin_unlock_irqrestore(&fbc->lock, flags);
110 return;
112 } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount));
114 #else
116 * local_irq_save() is used to make the function irq safe:
117 * - The slow path would be ok as protected by an irq-safe spinlock.
118 * - this_cpu_add would be ok as it is irq-safe by definition.
120 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
122 s64 count;
123 unsigned long flags;
125 local_irq_save(flags);
126 count = __this_cpu_read(*fbc->counters) + amount;
127 if (abs(count) >= batch) {
128 raw_spin_lock(&fbc->lock);
129 fbc->count += count;
130 __this_cpu_sub(*fbc->counters, count - amount);
131 raw_spin_unlock(&fbc->lock);
132 } else {
133 this_cpu_add(*fbc->counters, amount);
135 local_irq_restore(flags);
137 #endif
138 EXPORT_SYMBOL(percpu_counter_add_batch);
141 * For percpu_counter with a big batch, the devication of its count could
142 * be big, and there is requirement to reduce the deviation, like when the
143 * counter's batch could be runtime decreased to get a better accuracy,
144 * which can be achieved by running this sync function on each CPU.
146 void percpu_counter_sync(struct percpu_counter *fbc)
148 unsigned long flags;
149 s64 count;
151 raw_spin_lock_irqsave(&fbc->lock, flags);
152 count = __this_cpu_read(*fbc->counters);
153 fbc->count += count;
154 __this_cpu_sub(*fbc->counters, count);
155 raw_spin_unlock_irqrestore(&fbc->lock, flags);
157 EXPORT_SYMBOL(percpu_counter_sync);
160 * Add up all the per-cpu counts, return the result. This is a more accurate
161 * but much slower version of percpu_counter_read_positive().
163 * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
164 * from CPUs that are in the process of being taken offline. Dying cpus have
165 * been removed from the online mask, but may not have had the hotplug dead
166 * notifier called to fold the percpu count back into the global counter sum.
167 * By including dying CPUs in the iteration mask, we avoid this race condition
168 * so __percpu_counter_sum() just does the right thing when CPUs are being taken
169 * offline.
171 s64 __percpu_counter_sum(struct percpu_counter *fbc)
173 s64 ret;
174 int cpu;
175 unsigned long flags;
177 raw_spin_lock_irqsave(&fbc->lock, flags);
178 ret = fbc->count;
179 for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
180 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
181 ret += *pcount;
183 raw_spin_unlock_irqrestore(&fbc->lock, flags);
184 return ret;
186 EXPORT_SYMBOL(__percpu_counter_sum);
188 int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
189 gfp_t gfp, u32 nr_counters,
190 struct lock_class_key *key)
192 unsigned long flags __maybe_unused;
193 size_t counter_size;
194 s32 __percpu *counters;
195 u32 i;
197 counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
198 counters = __alloc_percpu_gfp(nr_counters * counter_size,
199 __alignof__(*counters), gfp);
200 if (!counters) {
201 fbc[0].counters = NULL;
202 return -ENOMEM;
205 for (i = 0; i < nr_counters; i++) {
206 raw_spin_lock_init(&fbc[i].lock);
207 lockdep_set_class(&fbc[i].lock, key);
208 #ifdef CONFIG_HOTPLUG_CPU
209 INIT_LIST_HEAD(&fbc[i].list);
210 #endif
211 fbc[i].count = amount;
212 fbc[i].counters = (void __percpu *)counters + i * counter_size;
214 debug_percpu_counter_activate(&fbc[i]);
217 #ifdef CONFIG_HOTPLUG_CPU
218 spin_lock_irqsave(&percpu_counters_lock, flags);
219 for (i = 0; i < nr_counters; i++)
220 list_add(&fbc[i].list, &percpu_counters);
221 spin_unlock_irqrestore(&percpu_counters_lock, flags);
222 #endif
223 return 0;
225 EXPORT_SYMBOL(__percpu_counter_init_many);
227 void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
229 unsigned long flags __maybe_unused;
230 u32 i;
232 if (WARN_ON_ONCE(!fbc))
233 return;
235 if (!fbc[0].counters)
236 return;
238 for (i = 0; i < nr_counters; i++)
239 debug_percpu_counter_deactivate(&fbc[i]);
241 #ifdef CONFIG_HOTPLUG_CPU
242 spin_lock_irqsave(&percpu_counters_lock, flags);
243 for (i = 0; i < nr_counters; i++)
244 list_del(&fbc[i].list);
245 spin_unlock_irqrestore(&percpu_counters_lock, flags);
246 #endif
248 free_percpu(fbc[0].counters);
250 for (i = 0; i < nr_counters; i++)
251 fbc[i].counters = NULL;
253 EXPORT_SYMBOL(percpu_counter_destroy_many);
255 int percpu_counter_batch __read_mostly = 32;
256 EXPORT_SYMBOL(percpu_counter_batch);
258 static int compute_batch_value(unsigned int cpu)
260 int nr = num_online_cpus();
262 percpu_counter_batch = max(32, nr*2);
263 return 0;
266 static int percpu_counter_cpu_dead(unsigned int cpu)
268 #ifdef CONFIG_HOTPLUG_CPU
269 struct percpu_counter *fbc;
271 compute_batch_value(cpu);
273 spin_lock_irq(&percpu_counters_lock);
274 list_for_each_entry(fbc, &percpu_counters, list) {
275 s32 *pcount;
277 raw_spin_lock(&fbc->lock);
278 pcount = per_cpu_ptr(fbc->counters, cpu);
279 fbc->count += *pcount;
280 *pcount = 0;
281 raw_spin_unlock(&fbc->lock);
283 spin_unlock_irq(&percpu_counters_lock);
284 #endif
285 return 0;
289 * Compare counter against given value.
290 * Return 1 if greater, 0 if equal and -1 if less
292 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
294 s64 count;
296 count = percpu_counter_read(fbc);
297 /* Check to see if rough count will be sufficient for comparison */
298 if (abs(count - rhs) > (batch * num_online_cpus())) {
299 if (count > rhs)
300 return 1;
301 else
302 return -1;
304 /* Need to use precise count */
305 count = percpu_counter_sum(fbc);
306 if (count > rhs)
307 return 1;
308 else if (count < rhs)
309 return -1;
310 else
311 return 0;
313 EXPORT_SYMBOL(__percpu_counter_compare);
316 * Compare counter, and add amount if total is: less than or equal to limit if
317 * amount is positive, or greater than or equal to limit if amount is negative.
318 * Return true if amount is added, or false if total would be beyond the limit.
320 * Negative limit is allowed, but unusual.
321 * When negative amounts (subs) are given to percpu_counter_limited_add(),
322 * the limit would most naturally be 0 - but other limits are also allowed.
324 * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
325 * are all assumed to be sane (far from S64_MIN and S64_MAX).
327 bool __percpu_counter_limited_add(struct percpu_counter *fbc,
328 s64 limit, s64 amount, s32 batch)
330 s64 count;
331 s64 unknown;
332 unsigned long flags;
333 bool good = false;
335 if (amount == 0)
336 return true;
338 local_irq_save(flags);
339 unknown = batch * num_online_cpus();
340 count = __this_cpu_read(*fbc->counters);
342 /* Skip taking the lock when safe */
343 if (abs(count + amount) <= batch &&
344 ((amount > 0 && fbc->count + unknown <= limit) ||
345 (amount < 0 && fbc->count - unknown >= limit))) {
346 this_cpu_add(*fbc->counters, amount);
347 local_irq_restore(flags);
348 return true;
351 raw_spin_lock(&fbc->lock);
352 count = fbc->count + amount;
354 /* Skip percpu_counter_sum() when safe */
355 if (amount > 0) {
356 if (count - unknown > limit)
357 goto out;
358 if (count + unknown <= limit)
359 good = true;
360 } else {
361 if (count + unknown < limit)
362 goto out;
363 if (count - unknown >= limit)
364 good = true;
367 if (!good) {
368 s32 *pcount;
369 int cpu;
371 for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
372 pcount = per_cpu_ptr(fbc->counters, cpu);
373 count += *pcount;
375 if (amount > 0) {
376 if (count > limit)
377 goto out;
378 } else {
379 if (count < limit)
380 goto out;
382 good = true;
385 count = __this_cpu_read(*fbc->counters);
386 fbc->count += count + amount;
387 __this_cpu_sub(*fbc->counters, count);
388 out:
389 raw_spin_unlock(&fbc->lock);
390 local_irq_restore(flags);
391 return good;
394 static int __init percpu_counter_startup(void)
396 int ret;
398 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
399 compute_batch_value, NULL);
400 WARN_ON(ret < 0);
401 ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
402 "lib/percpu_cnt:dead", NULL,
403 percpu_counter_cpu_dead);
404 WARN_ON(ret < 0);
405 return 0;
407 module_init(percpu_counter_startup);