Blackfin arch: scrub remaining ASSEMBLY usage since the switch to __ASSEMBLY__
[pv_ops_mirror.git] / include / asm-powerpc / local.h
blob612d8327665351c43b39dcd41e5323178ecc6f60
1 #ifndef _ARCH_POWERPC_LOCAL_H
2 #define _ARCH_POWERPC_LOCAL_H
4 #include <linux/percpu.h>
5 #include <asm/atomic.h>
7 typedef struct
9 atomic_long_t a;
10 } local_t;
12 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14 #define local_read(l) atomic_long_read(&(l)->a)
15 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
17 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
18 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
19 #define local_inc(l) atomic_long_inc(&(l)->a)
20 #define local_dec(l) atomic_long_dec(&(l)->a)
22 static __inline__ long local_add_return(long a, local_t *l)
24 long t;
26 __asm__ __volatile__(
27 "1:" PPC_LLARX "%0,0,%2 # local_add_return\n\
28 add %0,%1,%0\n"
29 PPC405_ERR77(0,%2)
30 PPC_STLCX "%0,0,%2 \n\
31 bne- 1b"
32 : "=&r" (t)
33 : "r" (a), "r" (&(l->a.counter))
34 : "cc", "memory");
36 return t;
39 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
41 static __inline__ long local_sub_return(long a, local_t *l)
43 long t;
45 __asm__ __volatile__(
46 "1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\
47 subf %0,%1,%0\n"
48 PPC405_ERR77(0,%2)
49 PPC_STLCX "%0,0,%2 \n\
50 bne- 1b"
51 : "=&r" (t)
52 : "r" (a), "r" (&(l->a.counter))
53 : "cc", "memory");
55 return t;
58 static __inline__ long local_inc_return(local_t *l)
60 long t;
62 __asm__ __volatile__(
63 "1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\
64 addic %0,%0,1\n"
65 PPC405_ERR77(0,%1)
66 PPC_STLCX "%0,0,%1 \n\
67 bne- 1b"
68 : "=&r" (t)
69 : "r" (&(l->a.counter))
70 : "cc", "memory");
72 return t;
76 * local_inc_and_test - increment and test
77 * @l: pointer of type local_t
79 * Atomically increments @l by 1
80 * and returns true if the result is zero, or false for all
81 * other cases.
83 #define local_inc_and_test(l) (local_inc_return(l) == 0)
85 static __inline__ long local_dec_return(local_t *l)
87 long t;
89 __asm__ __volatile__(
90 "1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\
91 addic %0,%0,-1\n"
92 PPC405_ERR77(0,%1)
93 PPC_STLCX "%0,0,%1\n\
94 bne- 1b"
95 : "=&r" (t)
96 : "r" (&(l->a.counter))
97 : "cc", "memory");
99 return t;
102 #define local_cmpxchg(l, o, n) \
103 (cmpxchg_local(&((l)->a.counter), (o), (n)))
104 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
107 * local_add_unless - add unless the number is a given value
108 * @l: pointer of type local_t
109 * @a: the amount to add to v...
110 * @u: ...unless v is equal to u.
112 * Atomically adds @a to @l, so long as it was not @u.
113 * Returns non-zero if @l was not @u, and zero otherwise.
115 static __inline__ int local_add_unless(local_t *l, long a, long u)
117 long t;
119 __asm__ __volatile__ (
120 "1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\
121 cmpw 0,%0,%3 \n\
122 beq- 2f \n\
123 add %0,%2,%0 \n"
124 PPC405_ERR77(0,%2)
125 PPC_STLCX "%0,0,%1 \n\
126 bne- 1b \n"
127 " subf %0,%2,%0 \n\
129 : "=&r" (t)
130 : "r" (&(l->a.counter)), "r" (a), "r" (u)
131 : "cc", "memory");
133 return t != u;
136 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
138 #define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
139 #define local_dec_and_test(l) (local_dec_return((l)) == 0)
142 * Atomically test *l and decrement if it is greater than 0.
143 * The function returns the old value of *l minus 1.
145 static __inline__ long local_dec_if_positive(local_t *l)
147 long t;
149 __asm__ __volatile__(
150 "1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\
151 cmpwi %0,1\n\
152 addi %0,%0,-1\n\
153 blt- 2f\n"
154 PPC405_ERR77(0,%1)
155 PPC_STLCX "%0,0,%1\n\
156 bne- 1b"
157 "\n\
158 2:" : "=&b" (t)
159 : "r" (&(l->a.counter))
160 : "cc", "memory");
162 return t;
165 /* Use these for per-cpu local_t variables: on some archs they are
166 * much more efficient than these naive implementations. Note they take
167 * a variable, not an address.
170 #define __local_inc(l) ((l)->a.counter++)
171 #define __local_dec(l) ((l)->a.counter++)
172 #define __local_add(i,l) ((l)->a.counter+=(i))
173 #define __local_sub(i,l) ((l)->a.counter-=(i))
175 /* Need to disable preemption for the cpu local counters otherwise we could
176 still access a variable of a previous CPU in a non atomic way. */
177 #define cpu_local_wrap_v(l) \
178 ({ local_t res__; \
179 preempt_disable(); \
180 res__ = (l); \
181 preempt_enable(); \
182 res__; })
183 #define cpu_local_wrap(l) \
184 ({ preempt_disable(); \
185 l; \
186 preempt_enable(); }) \
188 #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
189 #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
190 #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
191 #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
192 #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
193 #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
195 #define __cpu_local_inc(l) cpu_local_inc(l)
196 #define __cpu_local_dec(l) cpu_local_dec(l)
197 #define __cpu_local_add(i, l) cpu_local_add((i), (l))
198 #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
200 #endif /* _ARCH_POWERPC_LOCAL_H */