1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16 #include <linux/gfp.h>
20 struct percpu_counter
{
23 #ifdef CONFIG_HOTPLUG_CPU
24 struct list_head list
; /* All percpu_counters are on a list */
26 s32 __percpu
*counters
;
29 extern int percpu_counter_batch
;
31 int __percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
, gfp_t gfp
,
32 struct lock_class_key
*key
);
34 #define percpu_counter_init(fbc, value, gfp) \
36 static struct lock_class_key __key; \
38 __percpu_counter_init(fbc, value, gfp, &__key); \
41 void percpu_counter_destroy(struct percpu_counter
*fbc
);
42 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
);
43 void percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
,
45 s64
__percpu_counter_sum(struct percpu_counter
*fbc
);
46 int __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
);
48 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
50 return __percpu_counter_compare(fbc
, rhs
, percpu_counter_batch
);
53 static inline void percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
55 percpu_counter_add_batch(fbc
, amount
, percpu_counter_batch
);
58 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
60 s64 ret
= __percpu_counter_sum(fbc
);
61 return ret
< 0 ? 0 : ret
;
64 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
66 return __percpu_counter_sum(fbc
);
69 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
75 * It is possible for the percpu_counter_read() to return a small negative
76 * number for some counter which should never be negative.
79 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
83 barrier(); /* Prevent reloads of fbc->count */
89 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
91 return (fbc
->counters
!= NULL
);
94 #else /* !CONFIG_SMP */
96 struct percpu_counter
{
100 static inline int percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
,
107 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
111 static inline void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
116 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
118 if (fbc
->count
> rhs
)
120 else if (fbc
->count
< rhs
)
127 __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
)
129 return percpu_counter_compare(fbc
, rhs
);
133 percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
136 fbc
->count
+= amount
;
141 percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
143 percpu_counter_add(fbc
, amount
);
146 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
152 * percpu_counter is intended to track positive numbers. In the UP case the
153 * number should never be negative.
155 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
160 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
162 return percpu_counter_read_positive(fbc
);
165 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
167 return percpu_counter_read(fbc
);
170 static inline bool percpu_counter_initialized(struct percpu_counter
*fbc
)
175 #endif /* CONFIG_SMP */
177 static inline void percpu_counter_inc(struct percpu_counter
*fbc
)
179 percpu_counter_add(fbc
, 1);
182 static inline void percpu_counter_dec(struct percpu_counter
*fbc
)
184 percpu_counter_add(fbc
, -1);
187 static inline void percpu_counter_sub(struct percpu_counter
*fbc
, s64 amount
)
189 percpu_counter_add(fbc
, -amount
);
192 #endif /* _LINUX_PERCPU_COUNTER_H */