Merge tag 'block-6.13-20242901' of git://git.kernel.dk/linux
[drm/drm-misc.git] / include / linux / percpu_counter.h
blob3a44dd1e33d241589fce9fb8b629cd6fa32c6a12
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
4 /*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
20 #ifdef CONFIG_SMP
22 struct percpu_counter {
23 raw_spinlock_t lock;
24 s64 count;
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
27 #endif
28 s32 __percpu *counters;
31 extern int percpu_counter_batch;
33 int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
34 gfp_t gfp, u32 nr_counters,
35 struct lock_class_key *key);
37 #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
38 ({ \
39 static struct lock_class_key __key; \
41 __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
42 &__key); \
46 #define percpu_counter_init(fbc, value, gfp) \
47 percpu_counter_init_many(fbc, value, gfp, 1)
49 void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
50 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
52 percpu_counter_destroy_many(fbc, 1);
55 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
56 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
57 s32 batch);
58 s64 __percpu_counter_sum(struct percpu_counter *fbc);
59 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
60 bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
61 s64 amount, s32 batch);
62 void percpu_counter_sync(struct percpu_counter *fbc);
64 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
66 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
69 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
71 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
74 static inline bool
75 percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
77 return __percpu_counter_limited_add(fbc, limit, amount,
78 percpu_counter_batch);
82 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
83 * are accumulated in local per cpu counter and not in fbc->count until
84 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
85 * write efficient.
86 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
87 * used to add up the counts from each CPU to account for all the local
88 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
89 * should be used when a counter is updated frequently and read rarely.
91 static inline void
92 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
94 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
97 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
99 s64 ret = __percpu_counter_sum(fbc);
100 return ret < 0 ? 0 : ret;
103 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
105 return __percpu_counter_sum(fbc);
108 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
110 return fbc->count;
114 * It is possible for the percpu_counter_read() to return a small negative
115 * number for some counter which should never be negative.
118 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
120 /* Prevent reloads of fbc->count */
121 s64 ret = READ_ONCE(fbc->count);
123 if (ret >= 0)
124 return ret;
125 return 0;
128 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
130 return (fbc->counters != NULL);
133 #else /* !CONFIG_SMP */
135 struct percpu_counter {
136 s64 count;
139 static inline int percpu_counter_init_many(struct percpu_counter *fbc,
140 s64 amount, gfp_t gfp,
141 u32 nr_counters)
143 u32 i;
145 for (i = 0; i < nr_counters; i++)
146 fbc[i].count = amount;
148 return 0;
151 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
152 gfp_t gfp)
154 return percpu_counter_init_many(fbc, amount, gfp, 1);
157 static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
158 u32 nr_counters)
162 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
166 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
168 fbc->count = amount;
171 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
173 if (fbc->count > rhs)
174 return 1;
175 else if (fbc->count < rhs)
176 return -1;
177 else
178 return 0;
181 static inline int
182 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
184 return percpu_counter_compare(fbc, rhs);
187 static inline void
188 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
190 unsigned long flags;
192 local_irq_save(flags);
193 fbc->count += amount;
194 local_irq_restore(flags);
197 static inline bool
198 percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
200 unsigned long flags;
201 bool good = false;
202 s64 count;
204 if (amount == 0)
205 return true;
207 local_irq_save(flags);
208 count = fbc->count + amount;
209 if ((amount > 0 && count <= limit) ||
210 (amount < 0 && count >= limit)) {
211 fbc->count = count;
212 good = true;
214 local_irq_restore(flags);
215 return good;
218 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
219 static inline void
220 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
222 percpu_counter_add(fbc, amount);
225 static inline void
226 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
228 percpu_counter_add(fbc, amount);
231 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
233 return fbc->count;
237 * percpu_counter is intended to track positive numbers. In the UP case the
238 * number should never be negative.
240 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
242 return fbc->count;
245 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
247 return percpu_counter_read_positive(fbc);
250 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
252 return percpu_counter_read(fbc);
255 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
257 return true;
260 static inline void percpu_counter_sync(struct percpu_counter *fbc)
263 #endif /* CONFIG_SMP */
265 static inline void percpu_counter_inc(struct percpu_counter *fbc)
267 percpu_counter_add(fbc, 1);
270 static inline void percpu_counter_dec(struct percpu_counter *fbc)
272 percpu_counter_add(fbc, -1);
275 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
277 percpu_counter_add(fbc, -amount);
280 static inline void
281 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
283 percpu_counter_add_local(fbc, -amount);
286 #endif /* _LINUX_PERCPU_COUNTER_H */