1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
22 struct percpu_counter
{
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list
; /* All percpu_counters are on a list */
28 s32 __percpu
*counters
;
31 extern int percpu_counter_batch
;
33 int __percpu_counter_init_many(struct percpu_counter
*fbc
, s64 amount
,
34 gfp_t gfp
, u32 nr_counters
,
35 struct lock_class_key
*key
);
37 #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
39 static struct lock_class_key __key; \
41 __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
46 #define percpu_counter_init(fbc, value, gfp) \
47 percpu_counter_init_many(fbc, value, gfp, 1)
49 void percpu_counter_destroy_many(struct percpu_counter
*fbc
, u32 nr_counters
);
50 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
52 percpu_counter_destroy_many(fbc
, 1);
55 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
);
56 void percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
,
58 s64
__percpu_counter_sum(struct percpu_counter
*fbc
);
59 int __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
);
60 bool __percpu_counter_limited_add(struct percpu_counter
*fbc
, s64 limit
,
61 s64 amount
, s32 batch
);
62 void percpu_counter_sync(struct percpu_counter
*fbc
);
64 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
66 return __percpu_counter_compare(fbc
, rhs
, percpu_counter_batch
);
69 static inline void percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
71 percpu_counter_add_batch(fbc
, amount
, percpu_counter_batch
);
75 percpu_counter_limited_add(struct percpu_counter
*fbc
, s64 limit
, s64 amount
)
77 return __percpu_counter_limited_add(fbc
, limit
, amount
,
78 percpu_counter_batch
);
82 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
83 * are accumulated in local per cpu counter and not in fbc->count until
84 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
86 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
87 * used to add up the counts from each CPU to account for all the local
88 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
89 * should be used when a counter is updated frequently and read rarely.
92 percpu_counter_add_local(struct percpu_counter
*fbc
, s64 amount
)
94 percpu_counter_add_batch(fbc
, amount
, PERCPU_COUNTER_LOCAL_BATCH
);
97 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
99 s64 ret
= __percpu_counter_sum(fbc
);
100 return ret
< 0 ? 0 : ret
;
103 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
105 return __percpu_counter_sum(fbc
);
108 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
114 * It is possible for the percpu_counter_read() to return a small negative
115 * number for some counter which should never be negative.
118 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
120 /* Prevent reloads of fbc->count */
121 s64 ret
= READ_ONCE(fbc
->count
);
128 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
130 return (fbc
->counters
!= NULL
);
133 #else /* !CONFIG_SMP */
135 struct percpu_counter
{
139 static inline int percpu_counter_init_many(struct percpu_counter
*fbc
,
140 s64 amount
, gfp_t gfp
,
145 for (i
= 0; i
< nr_counters
; i
++)
146 fbc
[i
].count
= amount
;
151 static inline int percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
,
154 return percpu_counter_init_many(fbc
, amount
, gfp
, 1);
157 static inline void percpu_counter_destroy_many(struct percpu_counter
*fbc
,
162 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
166 static inline void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
171 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
173 if (fbc
->count
> rhs
)
175 else if (fbc
->count
< rhs
)
182 __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
)
184 return percpu_counter_compare(fbc
, rhs
);
188 percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
192 local_irq_save(flags
);
193 fbc
->count
+= amount
;
194 local_irq_restore(flags
);
198 percpu_counter_limited_add(struct percpu_counter
*fbc
, s64 limit
, s64 amount
)
207 local_irq_save(flags
);
208 count
= fbc
->count
+ amount
;
209 if ((amount
> 0 && count
<= limit
) ||
210 (amount
< 0 && count
>= limit
)) {
214 local_irq_restore(flags
);
218 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
220 percpu_counter_add_local(struct percpu_counter
*fbc
, s64 amount
)
222 percpu_counter_add(fbc
, amount
);
226 percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
228 percpu_counter_add(fbc
, amount
);
231 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
237 * percpu_counter is intended to track positive numbers. In the UP case the
238 * number should never be negative.
240 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
245 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
247 return percpu_counter_read_positive(fbc
);
250 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
252 return percpu_counter_read(fbc
);
255 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
260 static inline void percpu_counter_sync(struct percpu_counter
*fbc
)
263 #endif /* CONFIG_SMP */
265 static inline void percpu_counter_inc(struct percpu_counter
*fbc
)
267 percpu_counter_add(fbc
, 1);
270 static inline void percpu_counter_dec(struct percpu_counter
*fbc
)
272 percpu_counter_add(fbc
, -1);
275 static inline void percpu_counter_sub(struct percpu_counter
*fbc
, s64 amount
)
277 percpu_counter_add(fbc
, -amount
);
281 percpu_counter_sub_local(struct percpu_counter
*fbc
, s64 amount
)
283 percpu_counter_add_local(fbc
, -amount
);
286 #endif /* _LINUX_PERCPU_COUNTER_H */