temp: SR: IO_ADDRESS conversion
[linux-ginger.git] / arch / alpha / include / asm / bitops.h
blob15f3ae25c51137bb8af4a67b5d5e8cbbea328241
1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #ifndef _LINUX_BITOPS_H
5 #error only <linux/bitops.h> can be included directly
6 #endif
8 #include <asm/compiler.h>
9 #include <asm/barrier.h>
12 * Copyright 1994, Linus Torvalds.
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
27 static inline void
28 set_bit(unsigned long nr, volatile void * addr)
30 unsigned long temp;
31 int *m = ((int *) addr) + (nr >> 5);
33 __asm__ __volatile__(
34 "1: ldl_l %0,%3\n"
35 " bis %0,%2,%0\n"
36 " stl_c %0,%1\n"
37 " beq %0,2f\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 :"=&r" (temp), "=m" (*m)
42 :"Ir" (1UL << (nr & 31)), "m" (*m));
46 * WARNING: non atomic version.
48 static inline void
49 __set_bit(unsigned long nr, volatile void * addr)
51 int *m = ((int *) addr) + (nr >> 5);
53 *m |= 1 << (nr & 31);
56 #define smp_mb__before_clear_bit() smp_mb()
57 #define smp_mb__after_clear_bit() smp_mb()
59 static inline void
60 clear_bit(unsigned long nr, volatile void * addr)
62 unsigned long temp;
63 int *m = ((int *) addr) + (nr >> 5);
65 __asm__ __volatile__(
66 "1: ldl_l %0,%3\n"
67 " bic %0,%2,%0\n"
68 " stl_c %0,%1\n"
69 " beq %0,2f\n"
70 ".subsection 2\n"
71 "2: br 1b\n"
72 ".previous"
73 :"=&r" (temp), "=m" (*m)
74 :"Ir" (1UL << (nr & 31)), "m" (*m));
77 static inline void
78 clear_bit_unlock(unsigned long nr, volatile void * addr)
80 smp_mb();
81 clear_bit(nr, addr);
85 * WARNING: non atomic version.
87 static __inline__ void
88 __clear_bit(unsigned long nr, volatile void * addr)
90 int *m = ((int *) addr) + (nr >> 5);
92 *m &= ~(1 << (nr & 31));
95 static inline void
96 __clear_bit_unlock(unsigned long nr, volatile void * addr)
98 smp_mb();
99 __clear_bit(nr, addr);
102 static inline void
103 change_bit(unsigned long nr, volatile void * addr)
105 unsigned long temp;
106 int *m = ((int *) addr) + (nr >> 5);
108 __asm__ __volatile__(
109 "1: ldl_l %0,%3\n"
110 " xor %0,%2,%0\n"
111 " stl_c %0,%1\n"
112 " beq %0,2f\n"
113 ".subsection 2\n"
114 "2: br 1b\n"
115 ".previous"
116 :"=&r" (temp), "=m" (*m)
117 :"Ir" (1UL << (nr & 31)), "m" (*m));
121 * WARNING: non atomic version.
123 static __inline__ void
124 __change_bit(unsigned long nr, volatile void * addr)
126 int *m = ((int *) addr) + (nr >> 5);
128 *m ^= 1 << (nr & 31);
131 static inline int
132 test_and_set_bit(unsigned long nr, volatile void *addr)
134 unsigned long oldbit;
135 unsigned long temp;
136 int *m = ((int *) addr) + (nr >> 5);
138 __asm__ __volatile__(
139 #ifdef CONFIG_SMP
140 " mb\n"
141 #endif
142 "1: ldl_l %0,%4\n"
143 " and %0,%3,%2\n"
144 " bne %2,2f\n"
145 " xor %0,%3,%0\n"
146 " stl_c %0,%1\n"
147 " beq %0,3f\n"
148 "2:\n"
149 #ifdef CONFIG_SMP
150 " mb\n"
151 #endif
152 ".subsection 2\n"
153 "3: br 1b\n"
154 ".previous"
155 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
156 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
158 return oldbit != 0;
161 static inline int
162 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
164 unsigned long oldbit;
165 unsigned long temp;
166 int *m = ((int *) addr) + (nr >> 5);
168 __asm__ __volatile__(
169 "1: ldl_l %0,%4\n"
170 " and %0,%3,%2\n"
171 " bne %2,2f\n"
172 " xor %0,%3,%0\n"
173 " stl_c %0,%1\n"
174 " beq %0,3f\n"
175 "2:\n"
176 #ifdef CONFIG_SMP
177 " mb\n"
178 #endif
179 ".subsection 2\n"
180 "3: br 1b\n"
181 ".previous"
182 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
183 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
185 return oldbit != 0;
189 * WARNING: non atomic version.
191 static inline int
192 __test_and_set_bit(unsigned long nr, volatile void * addr)
194 unsigned long mask = 1 << (nr & 0x1f);
195 int *m = ((int *) addr) + (nr >> 5);
196 int old = *m;
198 *m = old | mask;
199 return (old & mask) != 0;
202 static inline int
203 test_and_clear_bit(unsigned long nr, volatile void * addr)
205 unsigned long oldbit;
206 unsigned long temp;
207 int *m = ((int *) addr) + (nr >> 5);
209 __asm__ __volatile__(
210 #ifdef CONFIG_SMP
211 " mb\n"
212 #endif
213 "1: ldl_l %0,%4\n"
214 " and %0,%3,%2\n"
215 " beq %2,2f\n"
216 " xor %0,%3,%0\n"
217 " stl_c %0,%1\n"
218 " beq %0,3f\n"
219 "2:\n"
220 #ifdef CONFIG_SMP
221 " mb\n"
222 #endif
223 ".subsection 2\n"
224 "3: br 1b\n"
225 ".previous"
226 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
227 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
229 return oldbit != 0;
233 * WARNING: non atomic version.
235 static inline int
236 __test_and_clear_bit(unsigned long nr, volatile void * addr)
238 unsigned long mask = 1 << (nr & 0x1f);
239 int *m = ((int *) addr) + (nr >> 5);
240 int old = *m;
242 *m = old & ~mask;
243 return (old & mask) != 0;
246 static inline int
247 test_and_change_bit(unsigned long nr, volatile void * addr)
249 unsigned long oldbit;
250 unsigned long temp;
251 int *m = ((int *) addr) + (nr >> 5);
253 __asm__ __volatile__(
254 #ifdef CONFIG_SMP
255 " mb\n"
256 #endif
257 "1: ldl_l %0,%4\n"
258 " and %0,%3,%2\n"
259 " xor %0,%3,%0\n"
260 " stl_c %0,%1\n"
261 " beq %0,3f\n"
262 #ifdef CONFIG_SMP
263 " mb\n"
264 #endif
265 ".subsection 2\n"
266 "3: br 1b\n"
267 ".previous"
268 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
269 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
271 return oldbit != 0;
275 * WARNING: non atomic version.
277 static __inline__ int
278 __test_and_change_bit(unsigned long nr, volatile void * addr)
280 unsigned long mask = 1 << (nr & 0x1f);
281 int *m = ((int *) addr) + (nr >> 5);
282 int old = *m;
284 *m = old ^ mask;
285 return (old & mask) != 0;
288 static inline int
289 test_bit(int nr, const volatile void * addr)
291 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
295 * ffz = Find First Zero in word. Undefined if no zero exists,
296 * so code should check against ~0UL first..
298 * Do a binary search on the bits. Due to the nature of large
299 * constants on the alpha, it is worthwhile to split the search.
301 static inline unsigned long ffz_b(unsigned long x)
303 unsigned long sum, x1, x2, x4;
305 x = ~x & -~x; /* set first 0 bit, clear others */
306 x1 = x & 0xAA;
307 x2 = x & 0xCC;
308 x4 = x & 0xF0;
309 sum = x2 ? 2 : 0;
310 sum += (x4 != 0) * 4;
311 sum += (x1 != 0);
313 return sum;
316 static inline unsigned long ffz(unsigned long word)
318 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
319 /* Whee. EV67 can calculate it directly. */
320 return __kernel_cttz(~word);
321 #else
322 unsigned long bits, qofs, bofs;
324 bits = __kernel_cmpbge(word, ~0UL);
325 qofs = ffz_b(bits);
326 bits = __kernel_extbl(word, qofs);
327 bofs = ffz_b(bits);
329 return qofs*8 + bofs;
330 #endif
334 * __ffs = Find First set bit in word. Undefined if no set bit exists.
336 static inline unsigned long __ffs(unsigned long word)
338 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
339 /* Whee. EV67 can calculate it directly. */
340 return __kernel_cttz(word);
341 #else
342 unsigned long bits, qofs, bofs;
344 bits = __kernel_cmpbge(0, word);
345 qofs = ffz_b(bits);
346 bits = __kernel_extbl(word, qofs);
347 bofs = ffz_b(~bits);
349 return qofs*8 + bofs;
350 #endif
353 #ifdef __KERNEL__
356 * ffs: find first bit set. This is defined the same way as
357 * the libc and compiler builtin ffs routines, therefore
358 * differs in spirit from the above __ffs.
361 static inline int ffs(int word)
363 int result = __ffs(word) + 1;
364 return word ? result : 0;
368 * fls: find last bit set.
370 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
371 static inline int fls64(unsigned long word)
373 return 64 - __kernel_ctlz(word);
375 #else
376 extern const unsigned char __flsm1_tab[256];
378 static inline int fls64(unsigned long x)
380 unsigned long t, a, r;
382 t = __kernel_cmpbge (x, 0x0101010101010101UL);
383 a = __flsm1_tab[t];
384 t = __kernel_extbl (x, a);
385 r = a*8 + __flsm1_tab[t] + (x != 0);
387 return r;
389 #endif
391 static inline unsigned long __fls(unsigned long x)
393 return fls64(x) - 1;
396 static inline int fls(int x)
398 return fls64((unsigned int) x);
402 * hweightN: returns the hamming weight (i.e. the number
403 * of bits set) of a N-bit word
406 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
407 /* Whee. EV67 can calculate it directly. */
408 static inline unsigned long hweight64(unsigned long w)
410 return __kernel_ctpop(w);
413 static inline unsigned int hweight32(unsigned int w)
415 return hweight64(w);
418 static inline unsigned int hweight16(unsigned int w)
420 return hweight64(w & 0xffff);
423 static inline unsigned int hweight8(unsigned int w)
425 return hweight64(w & 0xff);
427 #else
428 #include <asm-generic/bitops/hweight.h>
429 #endif
431 #endif /* __KERNEL__ */
433 #include <asm-generic/bitops/find.h>
435 #ifdef __KERNEL__
438 * Every architecture must define this function. It's the fastest
439 * way of searching a 140-bit bitmap where the first 100 bits are
440 * unlikely to be set. It's guaranteed that at least one of the 140
441 * bits is set.
443 static inline unsigned long
444 sched_find_first_bit(unsigned long b[3])
446 unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
447 unsigned long ofs;
449 ofs = (b1 ? 64 : 128);
450 b1 = (b1 ? b1 : b2);
451 ofs = (b0 ? 0 : ofs);
452 b0 = (b0 ? b0 : b1);
454 return __ffs(b0) + ofs;
457 #include <asm-generic/bitops/ext2-non-atomic.h>
459 #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
460 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
462 #include <asm-generic/bitops/minix.h>
464 #endif /* __KERNEL__ */
466 #endif /* _ALPHA_BITOPS_H */