fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-x86 / local_32.h
blob6e85975b9ed239cbf953ffd9ec70bd0ad34414fe
1 #ifndef _ARCH_I386_LOCAL_H
2 #define _ARCH_I386_LOCAL_H
4 #include <linux/percpu.h>
5 #include <asm/system.h>
6 #include <asm/atomic.h>
8 typedef struct
10 atomic_long_t a;
11 } local_t;
13 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15 #define local_read(l) atomic_long_read(&(l)->a)
16 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
18 static __inline__ void local_inc(local_t *l)
20 __asm__ __volatile__(
21 "incl %0"
22 :"+m" (l->a.counter));
25 static __inline__ void local_dec(local_t *l)
27 __asm__ __volatile__(
28 "decl %0"
29 :"+m" (l->a.counter));
32 static __inline__ void local_add(long i, local_t *l)
34 __asm__ __volatile__(
35 "addl %1,%0"
36 :"+m" (l->a.counter)
37 :"ir" (i));
40 static __inline__ void local_sub(long i, local_t *l)
42 __asm__ __volatile__(
43 "subl %1,%0"
44 :"+m" (l->a.counter)
45 :"ir" (i));
48 /**
49 * local_sub_and_test - subtract value from variable and test result
50 * @i: integer value to subtract
51 * @l: pointer of type local_t
53 * Atomically subtracts @i from @l and returns
54 * true if the result is zero, or false for all
55 * other cases.
57 static __inline__ int local_sub_and_test(long i, local_t *l)
59 unsigned char c;
61 __asm__ __volatile__(
62 "subl %2,%0; sete %1"
63 :"+m" (l->a.counter), "=qm" (c)
64 :"ir" (i) : "memory");
65 return c;
68 /**
69 * local_dec_and_test - decrement and test
70 * @l: pointer of type local_t
72 * Atomically decrements @l by 1 and
73 * returns true if the result is 0, or false for all other
74 * cases.
76 static __inline__ int local_dec_and_test(local_t *l)
78 unsigned char c;
80 __asm__ __volatile__(
81 "decl %0; sete %1"
82 :"+m" (l->a.counter), "=qm" (c)
83 : : "memory");
84 return c != 0;
87 /**
88 * local_inc_and_test - increment and test
89 * @l: pointer of type local_t
91 * Atomically increments @l by 1
92 * and returns true if the result is zero, or false for all
93 * other cases.
95 static __inline__ int local_inc_and_test(local_t *l)
97 unsigned char c;
99 __asm__ __volatile__(
100 "incl %0; sete %1"
101 :"+m" (l->a.counter), "=qm" (c)
102 : : "memory");
103 return c != 0;
107 * local_add_negative - add and test if negative
108 * @l: pointer of type local_t
109 * @i: integer value to add
111 * Atomically adds @i to @l and returns true
112 * if the result is negative, or false when
113 * result is greater than or equal to zero.
115 static __inline__ int local_add_negative(long i, local_t *l)
117 unsigned char c;
119 __asm__ __volatile__(
120 "addl %2,%0; sets %1"
121 :"+m" (l->a.counter), "=qm" (c)
122 :"ir" (i) : "memory");
123 return c;
127 * local_add_return - add and return
128 * @l: pointer of type local_t
129 * @i: integer value to add
131 * Atomically adds @i to @l and returns @i + @l
133 static __inline__ long local_add_return(long i, local_t *l)
135 long __i;
136 #ifdef CONFIG_M386
137 unsigned long flags;
138 if(unlikely(boot_cpu_data.x86 <= 3))
139 goto no_xadd;
140 #endif
141 /* Modern 486+ processor */
142 __i = i;
143 __asm__ __volatile__(
144 "xaddl %0, %1;"
145 :"+r" (i), "+m" (l->a.counter)
146 : : "memory");
147 return i + __i;
149 #ifdef CONFIG_M386
150 no_xadd: /* Legacy 386 processor */
151 local_irq_save(flags);
152 __i = local_read(l);
153 local_set(l, i + __i);
154 local_irq_restore(flags);
155 return i + __i;
156 #endif
159 static __inline__ long local_sub_return(long i, local_t *l)
161 return local_add_return(-i,l);
164 #define local_inc_return(l) (local_add_return(1,l))
165 #define local_dec_return(l) (local_sub_return(1,l))
167 #define local_cmpxchg(l, o, n) \
168 (cmpxchg_local(&((l)->a.counter), (o), (n)))
169 /* Always has a lock prefix */
170 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
173 * local_add_unless - add unless the number is a given value
174 * @l: pointer of type local_t
175 * @a: the amount to add to l...
176 * @u: ...unless l is equal to u.
178 * Atomically adds @a to @l, so long as it was not @u.
179 * Returns non-zero if @l was not @u, and zero otherwise.
181 #define local_add_unless(l, a, u) \
182 ({ \
183 long c, old; \
184 c = local_read(l); \
185 for (;;) { \
186 if (unlikely(c == (u))) \
187 break; \
188 old = local_cmpxchg((l), c, c + (a)); \
189 if (likely(old == c)) \
190 break; \
191 c = old; \
193 c != (u); \
195 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
197 /* On x86, these are no better than the atomic variants. */
198 #define __local_inc(l) local_inc(l)
199 #define __local_dec(l) local_dec(l)
200 #define __local_add(i,l) local_add((i),(l))
201 #define __local_sub(i,l) local_sub((i),(l))
203 /* Use these for per-cpu local_t variables: on some archs they are
204 * much more efficient than these naive implementations. Note they take
205 * a variable, not an address.
208 /* Need to disable preemption for the cpu local counters otherwise we could
209 still access a variable of a previous CPU in a non atomic way. */
210 #define cpu_local_wrap_v(l) \
211 ({ local_t res__; \
212 preempt_disable(); \
213 res__ = (l); \
214 preempt_enable(); \
215 res__; })
216 #define cpu_local_wrap(l) \
217 ({ preempt_disable(); \
218 l; \
219 preempt_enable(); }) \
221 #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
222 #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
223 #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
224 #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
225 #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
226 #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
228 #define __cpu_local_inc(l) cpu_local_inc(l)
229 #define __cpu_local_dec(l) cpu_local_dec(l)
230 #define __cpu_local_add(i, l) cpu_local_add((i), (l))
231 #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
233 #endif /* _ARCH_I386_LOCAL_H */