ext4: fix undefined bit shift result in ext4_fill_flex_info
[linux/fpc-iii.git] / lib / percpu_counter.c
blobba6085d9c7411f33bbb6aa0077423e822ad1c3dc
1 /*
2 * Fast batching percpu counters.
3 */
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters);
15 static DEFINE_SPINLOCK(percpu_counters_lock);
16 #endif
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
20 static struct debug_obj_descr percpu_counter_debug_descr;
22 static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
24 struct percpu_counter *fbc = addr;
26 switch (state) {
27 case ODEBUG_STATE_ACTIVE:
28 percpu_counter_destroy(fbc);
29 debug_object_free(fbc, &percpu_counter_debug_descr);
30 return 1;
31 default:
32 return 0;
36 static struct debug_obj_descr percpu_counter_debug_descr = {
37 .name = "percpu_counter",
38 .fixup_free = percpu_counter_fixup_free,
41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
43 debug_object_init(fbc, &percpu_counter_debug_descr);
44 debug_object_activate(fbc, &percpu_counter_debug_descr);
47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
49 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 debug_object_free(fbc, &percpu_counter_debug_descr);
53 #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55 { }
56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57 { }
58 #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
62 int cpu;
64 raw_spin_lock(&fbc->lock);
65 for_each_possible_cpu(cpu) {
66 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
67 *pcount = 0;
69 fbc->count = amount;
70 raw_spin_unlock(&fbc->lock);
72 EXPORT_SYMBOL(percpu_counter_set);
74 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
76 s64 count;
78 preempt_disable();
79 count = __this_cpu_read(*fbc->counters) + amount;
80 if (count >= batch || count <= -batch) {
81 raw_spin_lock(&fbc->lock);
82 fbc->count += count;
83 __this_cpu_write(*fbc->counters, 0);
84 raw_spin_unlock(&fbc->lock);
85 } else {
86 __this_cpu_write(*fbc->counters, count);
88 preempt_enable();
90 EXPORT_SYMBOL(__percpu_counter_add);
93 * Add up all the per-cpu counts, return the result. This is a more accurate
94 * but much slower version of percpu_counter_read_positive()
96 s64 __percpu_counter_sum(struct percpu_counter *fbc)
98 s64 ret;
99 int cpu;
101 raw_spin_lock(&fbc->lock);
102 ret = fbc->count;
103 for_each_online_cpu(cpu) {
104 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
105 ret += *pcount;
107 raw_spin_unlock(&fbc->lock);
108 return ret;
110 EXPORT_SYMBOL(__percpu_counter_sum);
112 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
113 struct lock_class_key *key)
115 raw_spin_lock_init(&fbc->lock);
116 lockdep_set_class(&fbc->lock, key);
117 fbc->count = amount;
118 fbc->counters = alloc_percpu(s32);
119 if (!fbc->counters)
120 return -ENOMEM;
122 debug_percpu_counter_activate(fbc);
124 #ifdef CONFIG_HOTPLUG_CPU
125 INIT_LIST_HEAD(&fbc->list);
126 spin_lock(&percpu_counters_lock);
127 list_add(&fbc->list, &percpu_counters);
128 spin_unlock(&percpu_counters_lock);
129 #endif
130 return 0;
132 EXPORT_SYMBOL(__percpu_counter_init);
134 void percpu_counter_destroy(struct percpu_counter *fbc)
136 if (!fbc->counters)
137 return;
139 debug_percpu_counter_deactivate(fbc);
141 #ifdef CONFIG_HOTPLUG_CPU
142 spin_lock(&percpu_counters_lock);
143 list_del(&fbc->list);
144 spin_unlock(&percpu_counters_lock);
145 #endif
146 free_percpu(fbc->counters);
147 fbc->counters = NULL;
149 EXPORT_SYMBOL(percpu_counter_destroy);
151 int percpu_counter_batch __read_mostly = 32;
152 EXPORT_SYMBOL(percpu_counter_batch);
154 static void compute_batch_value(void)
156 int nr = num_online_cpus();
158 percpu_counter_batch = max(32, nr*2);
161 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
162 unsigned long action, void *hcpu)
164 #ifdef CONFIG_HOTPLUG_CPU
165 unsigned int cpu;
166 struct percpu_counter *fbc;
168 compute_batch_value();
169 if (action != CPU_DEAD)
170 return NOTIFY_OK;
172 cpu = (unsigned long)hcpu;
173 spin_lock(&percpu_counters_lock);
174 list_for_each_entry(fbc, &percpu_counters, list) {
175 s32 *pcount;
176 unsigned long flags;
178 raw_spin_lock_irqsave(&fbc->lock, flags);
179 pcount = per_cpu_ptr(fbc->counters, cpu);
180 fbc->count += *pcount;
181 *pcount = 0;
182 raw_spin_unlock_irqrestore(&fbc->lock, flags);
184 spin_unlock(&percpu_counters_lock);
185 #endif
186 return NOTIFY_OK;
190 * Compare counter against given value.
191 * Return 1 if greater, 0 if equal and -1 if less
193 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
195 s64 count;
197 count = percpu_counter_read(fbc);
198 /* Check to see if rough count will be sufficient for comparison */
199 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
200 if (count > rhs)
201 return 1;
202 else
203 return -1;
205 /* Need to use precise count */
206 count = percpu_counter_sum(fbc);
207 if (count > rhs)
208 return 1;
209 else if (count < rhs)
210 return -1;
211 else
212 return 0;
214 EXPORT_SYMBOL(percpu_counter_compare);
216 static int __init percpu_counter_startup(void)
218 compute_batch_value();
219 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
220 return 0;
222 module_init(percpu_counter_startup);