1 // SPDX-License-Identifier: GPL-2.0
3 * Fast batching percpu counters.
6 #include <linux/percpu_counter.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters
);
15 static DEFINE_SPINLOCK(percpu_counters_lock
);
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
20 static const struct debug_obj_descr percpu_counter_debug_descr
;
22 static bool percpu_counter_fixup_free(void *addr
, enum debug_obj_state state
)
24 struct percpu_counter
*fbc
= addr
;
27 case ODEBUG_STATE_ACTIVE
:
28 percpu_counter_destroy(fbc
);
29 debug_object_free(fbc
, &percpu_counter_debug_descr
);
36 static const struct debug_obj_descr percpu_counter_debug_descr
= {
37 .name
= "percpu_counter",
38 .fixup_free
= percpu_counter_fixup_free
,
41 static inline void debug_percpu_counter_activate(struct percpu_counter
*fbc
)
43 debug_object_init(fbc
, &percpu_counter_debug_descr
);
44 debug_object_activate(fbc
, &percpu_counter_debug_descr
);
47 static inline void debug_percpu_counter_deactivate(struct percpu_counter
*fbc
)
49 debug_object_deactivate(fbc
, &percpu_counter_debug_descr
);
50 debug_object_free(fbc
, &percpu_counter_debug_descr
);
53 #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54 static inline void debug_percpu_counter_activate(struct percpu_counter
*fbc
)
56 static inline void debug_percpu_counter_deactivate(struct percpu_counter
*fbc
)
58 #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
60 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
65 raw_spin_lock_irqsave(&fbc
->lock
, flags
);
66 for_each_possible_cpu(cpu
) {
67 s32
*pcount
= per_cpu_ptr(fbc
->counters
, cpu
);
71 raw_spin_unlock_irqrestore(&fbc
->lock
, flags
);
73 EXPORT_SYMBOL(percpu_counter_set
);
76 * This function is both preempt and irq safe. The former is due to explicit
77 * preemption disable. The latter is guaranteed by the fact that the slow path
78 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
79 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
80 * with irq state before calling this one
82 void percpu_counter_add_batch(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
87 count
= __this_cpu_read(*fbc
->counters
) + amount
;
88 if (abs(count
) >= batch
) {
90 raw_spin_lock_irqsave(&fbc
->lock
, flags
);
92 __this_cpu_sub(*fbc
->counters
, count
- amount
);
93 raw_spin_unlock_irqrestore(&fbc
->lock
, flags
);
95 this_cpu_add(*fbc
->counters
, amount
);
99 EXPORT_SYMBOL(percpu_counter_add_batch
);
102 * For percpu_counter with a big batch, the devication of its count could
103 * be big, and there is requirement to reduce the deviation, like when the
104 * counter's batch could be runtime decreased to get a better accuracy,
105 * which can be achieved by running this sync function on each CPU.
107 void percpu_counter_sync(struct percpu_counter
*fbc
)
112 raw_spin_lock_irqsave(&fbc
->lock
, flags
);
113 count
= __this_cpu_read(*fbc
->counters
);
115 __this_cpu_sub(*fbc
->counters
, count
);
116 raw_spin_unlock_irqrestore(&fbc
->lock
, flags
);
118 EXPORT_SYMBOL(percpu_counter_sync
);
121 * Add up all the per-cpu counts, return the result. This is a more accurate
122 * but much slower version of percpu_counter_read_positive()
124 s64
__percpu_counter_sum(struct percpu_counter
*fbc
)
130 raw_spin_lock_irqsave(&fbc
->lock
, flags
);
132 for_each_online_cpu(cpu
) {
133 s32
*pcount
= per_cpu_ptr(fbc
->counters
, cpu
);
136 raw_spin_unlock_irqrestore(&fbc
->lock
, flags
);
139 EXPORT_SYMBOL(__percpu_counter_sum
);
141 int __percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
, gfp_t gfp
,
142 struct lock_class_key
*key
)
144 unsigned long flags __maybe_unused
;
146 raw_spin_lock_init(&fbc
->lock
);
147 lockdep_set_class(&fbc
->lock
, key
);
149 fbc
->counters
= alloc_percpu_gfp(s32
, gfp
);
153 debug_percpu_counter_activate(fbc
);
155 #ifdef CONFIG_HOTPLUG_CPU
156 INIT_LIST_HEAD(&fbc
->list
);
157 spin_lock_irqsave(&percpu_counters_lock
, flags
);
158 list_add(&fbc
->list
, &percpu_counters
);
159 spin_unlock_irqrestore(&percpu_counters_lock
, flags
);
163 EXPORT_SYMBOL(__percpu_counter_init
);
165 void percpu_counter_destroy(struct percpu_counter
*fbc
)
167 unsigned long flags __maybe_unused
;
172 debug_percpu_counter_deactivate(fbc
);
174 #ifdef CONFIG_HOTPLUG_CPU
175 spin_lock_irqsave(&percpu_counters_lock
, flags
);
176 list_del(&fbc
->list
);
177 spin_unlock_irqrestore(&percpu_counters_lock
, flags
);
179 free_percpu(fbc
->counters
);
180 fbc
->counters
= NULL
;
182 EXPORT_SYMBOL(percpu_counter_destroy
);
184 int percpu_counter_batch __read_mostly
= 32;
185 EXPORT_SYMBOL(percpu_counter_batch
);
187 static int compute_batch_value(unsigned int cpu
)
189 int nr
= num_online_cpus();
191 percpu_counter_batch
= max(32, nr
*2);
195 static int percpu_counter_cpu_dead(unsigned int cpu
)
197 #ifdef CONFIG_HOTPLUG_CPU
198 struct percpu_counter
*fbc
;
200 compute_batch_value(cpu
);
202 spin_lock_irq(&percpu_counters_lock
);
203 list_for_each_entry(fbc
, &percpu_counters
, list
) {
206 raw_spin_lock(&fbc
->lock
);
207 pcount
= per_cpu_ptr(fbc
->counters
, cpu
);
208 fbc
->count
+= *pcount
;
210 raw_spin_unlock(&fbc
->lock
);
212 spin_unlock_irq(&percpu_counters_lock
);
218 * Compare counter against given value.
219 * Return 1 if greater, 0 if equal and -1 if less
221 int __percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
, s32 batch
)
225 count
= percpu_counter_read(fbc
);
226 /* Check to see if rough count will be sufficient for comparison */
227 if (abs(count
- rhs
) > (batch
* num_online_cpus())) {
233 /* Need to use precise count */
234 count
= percpu_counter_sum(fbc
);
237 else if (count
< rhs
)
242 EXPORT_SYMBOL(__percpu_counter_compare
);
244 static int __init
percpu_counter_startup(void)
248 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "lib/percpu_cnt:online",
249 compute_batch_value
, NULL
);
251 ret
= cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD
,
252 "lib/percpu_cnt:dead", NULL
,
253 percpu_counter_cpu_dead
);
257 module_init(percpu_counter_startup
);