KVM: VMX: Don't allow uninhibited access to EFER on i386
[linux/fpc-iii.git] / lib / percpu_counter.c
blob4a8ba4bf5f6f2b1c0de7d16f794d6d39cbb00d31
1 /*
2 * Fast batching percpu counters.
3 */
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
12 #ifdef CONFIG_HOTPLUG_CPU
13 static LIST_HEAD(percpu_counters);
14 static DEFINE_MUTEX(percpu_counters_lock);
15 #endif
17 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
19 int cpu;
21 spin_lock(&fbc->lock);
22 for_each_possible_cpu(cpu) {
23 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
24 *pcount = 0;
26 fbc->count = amount;
27 spin_unlock(&fbc->lock);
29 EXPORT_SYMBOL(percpu_counter_set);
31 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
33 s64 count;
34 s32 *pcount;
35 int cpu = get_cpu();
37 pcount = per_cpu_ptr(fbc->counters, cpu);
38 count = *pcount + amount;
39 if (count >= batch || count <= -batch) {
40 spin_lock(&fbc->lock);
41 fbc->count += count;
42 *pcount = 0;
43 spin_unlock(&fbc->lock);
44 } else {
45 *pcount = count;
47 put_cpu();
49 EXPORT_SYMBOL(__percpu_counter_add);
52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive()
55 s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
57 s64 ret;
58 int cpu;
60 spin_lock(&fbc->lock);
61 ret = fbc->count;
62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount;
65 if (set)
66 *pcount = 0;
68 if (set)
69 fbc->count = ret;
71 spin_unlock(&fbc->lock);
72 return ret;
74 EXPORT_SYMBOL(__percpu_counter_sum);
76 static struct lock_class_key percpu_counter_irqsafe;
78 int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
80 spin_lock_init(&fbc->lock);
81 fbc->count = amount;
82 fbc->counters = alloc_percpu(s32);
83 if (!fbc->counters)
84 return -ENOMEM;
85 #ifdef CONFIG_HOTPLUG_CPU
86 mutex_lock(&percpu_counters_lock);
87 list_add(&fbc->list, &percpu_counters);
88 mutex_unlock(&percpu_counters_lock);
89 #endif
90 return 0;
92 EXPORT_SYMBOL(percpu_counter_init);
94 int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
96 int err;
98 err = percpu_counter_init(fbc, amount);
99 if (!err)
100 lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
101 return err;
104 void percpu_counter_destroy(struct percpu_counter *fbc)
106 if (!fbc->counters)
107 return;
109 free_percpu(fbc->counters);
110 fbc->counters = NULL;
111 #ifdef CONFIG_HOTPLUG_CPU
112 mutex_lock(&percpu_counters_lock);
113 list_del(&fbc->list);
114 mutex_unlock(&percpu_counters_lock);
115 #endif
117 EXPORT_SYMBOL(percpu_counter_destroy);
119 #ifdef CONFIG_HOTPLUG_CPU
120 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
121 unsigned long action, void *hcpu)
123 unsigned int cpu;
124 struct percpu_counter *fbc;
126 if (action != CPU_DEAD)
127 return NOTIFY_OK;
129 cpu = (unsigned long)hcpu;
130 mutex_lock(&percpu_counters_lock);
131 list_for_each_entry(fbc, &percpu_counters, list) {
132 s32 *pcount;
133 unsigned long flags;
135 spin_lock_irqsave(&fbc->lock, flags);
136 pcount = per_cpu_ptr(fbc->counters, cpu);
137 fbc->count += *pcount;
138 *pcount = 0;
139 spin_unlock_irqrestore(&fbc->lock, flags);
141 mutex_unlock(&percpu_counters_lock);
142 return NOTIFY_OK;
145 static int __init percpu_counter_startup(void)
147 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
148 return 0;
150 module_init(percpu_counter_startup);
151 #endif