1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #ifndef _LINUX_BITOPS_H
5 #error only <linux/bitops.h> can be included directly
8 #include <asm/compiler.h>
9 #include <asm/barrier.h>
12 * Copyright 1994, Linus Torvalds.
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
24 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
28 set_bit(unsigned long nr
, volatile void * addr
)
31 int *m
= ((int *) addr
) + (nr
>> 5);
41 :"=&r" (temp
), "=m" (*m
)
42 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
46 * WARNING: non atomic version.
49 __set_bit(unsigned long nr
, volatile void * addr
)
51 int *m
= ((int *) addr
) + (nr
>> 5);
57 clear_bit(unsigned long nr
, volatile void * addr
)
60 int *m
= ((int *) addr
) + (nr
>> 5);
70 :"=&r" (temp
), "=m" (*m
)
71 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
75 clear_bit_unlock(unsigned long nr
, volatile void * addr
)
82 * WARNING: non atomic version.
84 static __inline__
void
85 __clear_bit(unsigned long nr
, volatile void * addr
)
87 int *m
= ((int *) addr
) + (nr
>> 5);
89 *m
&= ~(1 << (nr
& 31));
93 __clear_bit_unlock(unsigned long nr
, volatile void * addr
)
96 __clear_bit(nr
, addr
);
100 change_bit(unsigned long nr
, volatile void * addr
)
103 int *m
= ((int *) addr
) + (nr
>> 5);
105 __asm__
__volatile__(
113 :"=&r" (temp
), "=m" (*m
)
114 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
118 * WARNING: non atomic version.
120 static __inline__
void
121 __change_bit(unsigned long nr
, volatile void * addr
)
123 int *m
= ((int *) addr
) + (nr
>> 5);
125 *m
^= 1 << (nr
& 31);
129 test_and_set_bit(unsigned long nr
, volatile void *addr
)
131 unsigned long oldbit
;
133 int *m
= ((int *) addr
) + (nr
>> 5);
135 __asm__
__volatile__(
152 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
153 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
159 test_and_set_bit_lock(unsigned long nr
, volatile void *addr
)
161 unsigned long oldbit
;
163 int *m
= ((int *) addr
) + (nr
>> 5);
165 __asm__
__volatile__(
179 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
180 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
186 * WARNING: non atomic version.
189 __test_and_set_bit(unsigned long nr
, volatile void * addr
)
191 unsigned long mask
= 1 << (nr
& 0x1f);
192 int *m
= ((int *) addr
) + (nr
>> 5);
196 return (old
& mask
) != 0;
200 test_and_clear_bit(unsigned long nr
, volatile void * addr
)
202 unsigned long oldbit
;
204 int *m
= ((int *) addr
) + (nr
>> 5);
206 __asm__
__volatile__(
223 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
224 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
230 * WARNING: non atomic version.
233 __test_and_clear_bit(unsigned long nr
, volatile void * addr
)
235 unsigned long mask
= 1 << (nr
& 0x1f);
236 int *m
= ((int *) addr
) + (nr
>> 5);
240 return (old
& mask
) != 0;
244 test_and_change_bit(unsigned long nr
, volatile void * addr
)
246 unsigned long oldbit
;
248 int *m
= ((int *) addr
) + (nr
>> 5);
250 __asm__
__volatile__(
265 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
266 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
272 * WARNING: non atomic version.
274 static __inline__
int
275 __test_and_change_bit(unsigned long nr
, volatile void * addr
)
277 unsigned long mask
= 1 << (nr
& 0x1f);
278 int *m
= ((int *) addr
) + (nr
>> 5);
282 return (old
& mask
) != 0;
286 test_bit(int nr
, const volatile void * addr
)
288 return (1UL & (((const int *) addr
)[nr
>> 5] >> (nr
& 31))) != 0UL;
292 * ffz = Find First Zero in word. Undefined if no zero exists,
293 * so code should check against ~0UL first..
295 * Do a binary search on the bits. Due to the nature of large
296 * constants on the alpha, it is worthwhile to split the search.
298 static inline unsigned long ffz_b(unsigned long x
)
300 unsigned long sum
, x1
, x2
, x4
;
302 x
= ~x
& -~x
; /* set first 0 bit, clear others */
307 sum
+= (x4
!= 0) * 4;
313 static inline unsigned long ffz(unsigned long word
)
315 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
316 /* Whee. EV67 can calculate it directly. */
317 return __kernel_cttz(~word
);
319 unsigned long bits
, qofs
, bofs
;
321 bits
= __kernel_cmpbge(word
, ~0UL);
323 bits
= __kernel_extbl(word
, qofs
);
326 return qofs
*8 + bofs
;
331 * __ffs = Find First set bit in word. Undefined if no set bit exists.
333 static inline unsigned long __ffs(unsigned long word
)
335 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
336 /* Whee. EV67 can calculate it directly. */
337 return __kernel_cttz(word
);
339 unsigned long bits
, qofs
, bofs
;
341 bits
= __kernel_cmpbge(0, word
);
343 bits
= __kernel_extbl(word
, qofs
);
346 return qofs
*8 + bofs
;
353 * ffs: find first bit set. This is defined the same way as
354 * the libc and compiler builtin ffs routines, therefore
355 * differs in spirit from the above __ffs.
358 static inline int ffs(int word
)
360 int result
= __ffs(word
) + 1;
361 return word
? result
: 0;
365 * fls: find last bit set.
367 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
368 static inline int fls64(unsigned long word
)
370 return 64 - __kernel_ctlz(word
);
373 extern const unsigned char __flsm1_tab
[256];
375 static inline int fls64(unsigned long x
)
377 unsigned long t
, a
, r
;
379 t
= __kernel_cmpbge (x
, 0x0101010101010101UL
);
381 t
= __kernel_extbl (x
, a
);
382 r
= a
*8 + __flsm1_tab
[t
] + (x
!= 0);
388 static inline unsigned long __fls(unsigned long x
)
393 static inline int fls(int x
)
395 return fls64((unsigned int) x
);
399 * hweightN: returns the hamming weight (i.e. the number
400 * of bits set) of a N-bit word
403 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
404 /* Whee. EV67 can calculate it directly. */
405 static inline unsigned long __arch_hweight64(unsigned long w
)
407 return __kernel_ctpop(w
);
410 static inline unsigned int __arch_hweight32(unsigned int w
)
412 return __arch_hweight64(w
);
415 static inline unsigned int __arch_hweight16(unsigned int w
)
417 return __arch_hweight64(w
& 0xffff);
420 static inline unsigned int __arch_hweight8(unsigned int w
)
422 return __arch_hweight64(w
& 0xff);
425 #include <asm-generic/bitops/arch_hweight.h>
428 #include <asm-generic/bitops/const_hweight.h>
430 #endif /* __KERNEL__ */
432 #include <asm-generic/bitops/find.h>
437 * Every architecture must define this function. It's the fastest
438 * way of searching a 100-bit bitmap. It's guaranteed that at least
439 * one of the 100 bits is cleared.
441 static inline unsigned long
442 sched_find_first_bit(const unsigned long b
[2])
444 unsigned long b0
, b1
, ofs
, tmp
;
449 tmp
= (b0
? b0
: b1
);
451 return __ffs(tmp
) + ofs
;
454 #include <asm-generic/bitops/le.h>
456 #include <asm-generic/bitops/ext2-atomic-setbit.h>
458 #endif /* __KERNEL__ */
460 #endif /* _ALPHA_BITOPS_H */