1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #ifndef _LINUX_BITOPS_H
5 #error only <linux/bitops.h> can be included directly
8 #include <asm/compiler.h>
9 #include <asm/barrier.h>
12 * Copyright 1994, Linus Torvalds.
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
28 set_bit(unsigned long nr
, volatile void * addr
)
31 int *m
= ((int *) addr
) + (nr
>> 5);
41 :"=&r" (temp
), "=m" (*m
)
42 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
46 * WARNING: non atomic version.
49 __set_bit(unsigned long nr
, volatile void * addr
)
51 int *m
= ((int *) addr
) + (nr
>> 5);
56 #define smp_mb__before_clear_bit() smp_mb()
57 #define smp_mb__after_clear_bit() smp_mb()
60 clear_bit(unsigned long nr
, volatile void * addr
)
63 int *m
= ((int *) addr
) + (nr
>> 5);
73 :"=&r" (temp
), "=m" (*m
)
74 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
78 clear_bit_unlock(unsigned long nr
, volatile void * addr
)
85 * WARNING: non atomic version.
87 static __inline__
void
88 __clear_bit(unsigned long nr
, volatile void * addr
)
90 int *m
= ((int *) addr
) + (nr
>> 5);
92 *m
&= ~(1 << (nr
& 31));
96 __clear_bit_unlock(unsigned long nr
, volatile void * addr
)
99 __clear_bit(nr
, addr
);
103 change_bit(unsigned long nr
, volatile void * addr
)
106 int *m
= ((int *) addr
) + (nr
>> 5);
108 __asm__
__volatile__(
116 :"=&r" (temp
), "=m" (*m
)
117 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
121 * WARNING: non atomic version.
123 static __inline__
void
124 __change_bit(unsigned long nr
, volatile void * addr
)
126 int *m
= ((int *) addr
) + (nr
>> 5);
128 *m
^= 1 << (nr
& 31);
132 test_and_set_bit(unsigned long nr
, volatile void *addr
)
134 unsigned long oldbit
;
136 int *m
= ((int *) addr
) + (nr
>> 5);
138 __asm__
__volatile__(
155 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
156 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
162 test_and_set_bit_lock(unsigned long nr
, volatile void *addr
)
164 unsigned long oldbit
;
166 int *m
= ((int *) addr
) + (nr
>> 5);
168 __asm__
__volatile__(
182 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
183 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
189 * WARNING: non atomic version.
192 __test_and_set_bit(unsigned long nr
, volatile void * addr
)
194 unsigned long mask
= 1 << (nr
& 0x1f);
195 int *m
= ((int *) addr
) + (nr
>> 5);
199 return (old
& mask
) != 0;
203 test_and_clear_bit(unsigned long nr
, volatile void * addr
)
205 unsigned long oldbit
;
207 int *m
= ((int *) addr
) + (nr
>> 5);
209 __asm__
__volatile__(
226 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
227 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
233 * WARNING: non atomic version.
236 __test_and_clear_bit(unsigned long nr
, volatile void * addr
)
238 unsigned long mask
= 1 << (nr
& 0x1f);
239 int *m
= ((int *) addr
) + (nr
>> 5);
243 return (old
& mask
) != 0;
247 test_and_change_bit(unsigned long nr
, volatile void * addr
)
249 unsigned long oldbit
;
251 int *m
= ((int *) addr
) + (nr
>> 5);
253 __asm__
__volatile__(
268 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
269 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
275 * WARNING: non atomic version.
277 static __inline__
int
278 __test_and_change_bit(unsigned long nr
, volatile void * addr
)
280 unsigned long mask
= 1 << (nr
& 0x1f);
281 int *m
= ((int *) addr
) + (nr
>> 5);
285 return (old
& mask
) != 0;
289 test_bit(int nr
, const volatile void * addr
)
291 return (1UL & (((const int *) addr
)[nr
>> 5] >> (nr
& 31))) != 0UL;
295 * ffz = Find First Zero in word. Undefined if no zero exists,
296 * so code should check against ~0UL first..
298 * Do a binary search on the bits. Due to the nature of large
299 * constants on the alpha, it is worthwhile to split the search.
301 static inline unsigned long ffz_b(unsigned long x
)
303 unsigned long sum
, x1
, x2
, x4
;
305 x
= ~x
& -~x
; /* set first 0 bit, clear others */
310 sum
+= (x4
!= 0) * 4;
316 static inline unsigned long ffz(unsigned long word
)
318 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
319 /* Whee. EV67 can calculate it directly. */
320 return __kernel_cttz(~word
);
322 unsigned long bits
, qofs
, bofs
;
324 bits
= __kernel_cmpbge(word
, ~0UL);
326 bits
= __kernel_extbl(word
, qofs
);
329 return qofs
*8 + bofs
;
334 * __ffs = Find First set bit in word. Undefined if no set bit exists.
336 static inline unsigned long __ffs(unsigned long word
)
338 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
339 /* Whee. EV67 can calculate it directly. */
340 return __kernel_cttz(word
);
342 unsigned long bits
, qofs
, bofs
;
344 bits
= __kernel_cmpbge(0, word
);
346 bits
= __kernel_extbl(word
, qofs
);
349 return qofs
*8 + bofs
;
356 * ffs: find first bit set. This is defined the same way as
357 * the libc and compiler builtin ffs routines, therefore
358 * differs in spirit from the above __ffs.
361 static inline int ffs(int word
)
363 int result
= __ffs(word
) + 1;
364 return word
? result
: 0;
368 * fls: find last bit set.
370 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
371 static inline int fls64(unsigned long word
)
373 return 64 - __kernel_ctlz(word
);
376 extern const unsigned char __flsm1_tab
[256];
378 static inline int fls64(unsigned long x
)
380 unsigned long t
, a
, r
;
382 t
= __kernel_cmpbge (x
, 0x0101010101010101UL
);
384 t
= __kernel_extbl (x
, a
);
385 r
= a
*8 + __flsm1_tab
[t
] + (x
!= 0);
391 static inline unsigned long __fls(unsigned long x
)
396 static inline int fls(int x
)
398 return fls64((unsigned int) x
);
402 * hweightN: returns the hamming weight (i.e. the number
403 * of bits set) of a N-bit word
406 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
407 /* Whee. EV67 can calculate it directly. */
408 static inline unsigned long __arch_hweight64(unsigned long w
)
410 return __kernel_ctpop(w
);
413 static inline unsigned int __arch_hweight32(unsigned int w
)
415 return __arch_hweight64(w
);
418 static inline unsigned int __arch_hweight16(unsigned int w
)
420 return __arch_hweight64(w
& 0xffff);
423 static inline unsigned int __arch_hweight8(unsigned int w
)
425 return __arch_hweight64(w
& 0xff);
428 #include <asm-generic/bitops/arch_hweight.h>
431 #include <asm-generic/bitops/const_hweight.h>
433 #endif /* __KERNEL__ */
435 #include <asm-generic/bitops/find.h>
440 * Every architecture must define this function. It's the fastest
441 * way of searching a 100-bit bitmap. It's guaranteed that at least
442 * one of the 100 bits is cleared.
444 static inline unsigned long
445 sched_find_first_bit(const unsigned long b
[2])
447 unsigned long b0
, b1
, ofs
, tmp
;
452 tmp
= (b0
? b0
: b1
);
454 return __ffs(tmp
) + ofs
;
457 #include <asm-generic/bitops/le.h>
459 #include <asm-generic/bitops/ext2-atomic-setbit.h>
461 #endif /* __KERNEL__ */
463 #endif /* _ALPHA_BITOPS_H */