mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / ia64 / include / asm / bitops.h
blob71e8145243ee885426ad24f1f07dc7fedbba5a66
1 #ifndef _ASM_IA64_BITOPS_H
2 #define _ASM_IA64_BITOPS_H
4 /*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
9 * O(1) scheduler patch
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
21 /**
22 * set_bit - Atomically set a bit in memory
23 * @nr: the bit to set
24 * @addr: the address to start counting from
26 * This function is atomic and may not be reordered. See __set_bit()
27 * if you do not require the atomic guarantees.
28 * Note that @nr may be almost arbitrarily large; this function is not
29 * restricted to acting on a single-word quantity.
31 * The address must be (at least) "long" aligned.
32 * Note that there are driver (e.g., eepro100) which use these operations to
33 * operate on hw-defined data-structures, so we can't easily change these
34 * operations to force a bigger alignment.
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
38 static __inline__ void
39 set_bit (int nr, volatile void *addr)
41 __u32 bit, old, new;
42 volatile __u32 *m;
43 CMPXCHG_BUGCHECK_DECL
45 m = (volatile __u32 *) addr + (nr >> 5);
46 bit = 1 << (nr & 31);
47 do {
48 CMPXCHG_BUGCHECK(m);
49 old = *m;
50 new = old | bit;
51 } while (cmpxchg_acq(m, old, new) != old);
54 /**
55 * __set_bit - Set a bit in memory
56 * @nr: the bit to set
57 * @addr: the address to start counting from
59 * Unlike set_bit(), this function is non-atomic and may be reordered.
60 * If it's called on the same region of memory simultaneously, the effect
61 * may be that only one operation succeeds.
63 static __inline__ void
64 __set_bit (int nr, volatile void *addr)
66 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
69 /**
70 * clear_bit - Clears a bit in memory
71 * @nr: Bit to clear
72 * @addr: Address to start counting from
74 * clear_bit() is atomic and may not be reordered. However, it does
75 * not contain a memory barrier, so if it is used for locking purposes,
76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
77 * in order to ensure changes are visible on other processors.
79 static __inline__ void
80 clear_bit (int nr, volatile void *addr)
82 __u32 mask, old, new;
83 volatile __u32 *m;
84 CMPXCHG_BUGCHECK_DECL
86 m = (volatile __u32 *) addr + (nr >> 5);
87 mask = ~(1 << (nr & 31));
88 do {
89 CMPXCHG_BUGCHECK(m);
90 old = *m;
91 new = old & mask;
92 } while (cmpxchg_acq(m, old, new) != old);
95 /**
96 * clear_bit_unlock - Clears a bit in memory with release
97 * @nr: Bit to clear
98 * @addr: Address to start counting from
100 * clear_bit_unlock() is atomic and may not be reordered. It does
101 * contain a memory barrier suitable for unlock type operations.
103 static __inline__ void
104 clear_bit_unlock (int nr, volatile void *addr)
106 __u32 mask, old, new;
107 volatile __u32 *m;
108 CMPXCHG_BUGCHECK_DECL
110 m = (volatile __u32 *) addr + (nr >> 5);
111 mask = ~(1 << (nr & 31));
112 do {
113 CMPXCHG_BUGCHECK(m);
114 old = *m;
115 new = old & mask;
116 } while (cmpxchg_rel(m, old, new) != old);
120 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
121 * @nr: Bit to clear
122 * @addr: Address to start counting from
124 * Similarly to clear_bit_unlock, the implementation uses a store
125 * with release semantics. See also arch_spin_unlock().
127 static __inline__ void
128 __clear_bit_unlock(int nr, void *addr)
130 __u32 * const m = (__u32 *) addr + (nr >> 5);
131 __u32 const new = *m & ~(1 << (nr & 31));
133 ia64_st4_rel_nta(m, new);
137 * __clear_bit - Clears a bit in memory (non-atomic version)
138 * @nr: the bit to clear
139 * @addr: the address to start counting from
141 * Unlike clear_bit(), this function is non-atomic and may be reordered.
142 * If it's called on the same region of memory simultaneously, the effect
143 * may be that only one operation succeeds.
145 static __inline__ void
146 __clear_bit (int nr, volatile void *addr)
148 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
152 * change_bit - Toggle a bit in memory
153 * @nr: Bit to toggle
154 * @addr: Address to start counting from
156 * change_bit() is atomic and may not be reordered.
157 * Note that @nr may be almost arbitrarily large; this function is not
158 * restricted to acting on a single-word quantity.
160 static __inline__ void
161 change_bit (int nr, volatile void *addr)
163 __u32 bit, old, new;
164 volatile __u32 *m;
165 CMPXCHG_BUGCHECK_DECL
167 m = (volatile __u32 *) addr + (nr >> 5);
168 bit = (1 << (nr & 31));
169 do {
170 CMPXCHG_BUGCHECK(m);
171 old = *m;
172 new = old ^ bit;
173 } while (cmpxchg_acq(m, old, new) != old);
177 * __change_bit - Toggle a bit in memory
178 * @nr: the bit to toggle
179 * @addr: the address to start counting from
181 * Unlike change_bit(), this function is non-atomic and may be reordered.
182 * If it's called on the same region of memory simultaneously, the effect
183 * may be that only one operation succeeds.
185 static __inline__ void
186 __change_bit (int nr, volatile void *addr)
188 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
192 * test_and_set_bit - Set a bit and return its old value
193 * @nr: Bit to set
194 * @addr: Address to count from
196 * This operation is atomic and cannot be reordered.
197 * It also implies the acquisition side of the memory barrier.
199 static __inline__ int
200 test_and_set_bit (int nr, volatile void *addr)
202 __u32 bit, old, new;
203 volatile __u32 *m;
204 CMPXCHG_BUGCHECK_DECL
206 m = (volatile __u32 *) addr + (nr >> 5);
207 bit = 1 << (nr & 31);
208 do {
209 CMPXCHG_BUGCHECK(m);
210 old = *m;
211 new = old | bit;
212 } while (cmpxchg_acq(m, old, new) != old);
213 return (old & bit) != 0;
217 * test_and_set_bit_lock - Set a bit and return its old value for lock
218 * @nr: Bit to set
219 * @addr: Address to count from
221 * This is the same as test_and_set_bit on ia64
223 #define test_and_set_bit_lock test_and_set_bit
226 * __test_and_set_bit - Set a bit and return its old value
227 * @nr: Bit to set
228 * @addr: Address to count from
230 * This operation is non-atomic and can be reordered.
231 * If two examples of this operation race, one can appear to succeed
232 * but actually fail. You must protect multiple accesses with a lock.
234 static __inline__ int
235 __test_and_set_bit (int nr, volatile void *addr)
237 __u32 *p = (__u32 *) addr + (nr >> 5);
238 __u32 m = 1 << (nr & 31);
239 int oldbitset = (*p & m) != 0;
241 *p |= m;
242 return oldbitset;
246 * test_and_clear_bit - Clear a bit and return its old value
247 * @nr: Bit to clear
248 * @addr: Address to count from
250 * This operation is atomic and cannot be reordered.
251 * It also implies the acquisition side of the memory barrier.
253 static __inline__ int
254 test_and_clear_bit (int nr, volatile void *addr)
256 __u32 mask, old, new;
257 volatile __u32 *m;
258 CMPXCHG_BUGCHECK_DECL
260 m = (volatile __u32 *) addr + (nr >> 5);
261 mask = ~(1 << (nr & 31));
262 do {
263 CMPXCHG_BUGCHECK(m);
264 old = *m;
265 new = old & mask;
266 } while (cmpxchg_acq(m, old, new) != old);
267 return (old & ~mask) != 0;
271 * __test_and_clear_bit - Clear a bit and return its old value
272 * @nr: Bit to clear
273 * @addr: Address to count from
275 * This operation is non-atomic and can be reordered.
276 * If two examples of this operation race, one can appear to succeed
277 * but actually fail. You must protect multiple accesses with a lock.
279 static __inline__ int
280 __test_and_clear_bit(int nr, volatile void * addr)
282 __u32 *p = (__u32 *) addr + (nr >> 5);
283 __u32 m = 1 << (nr & 31);
284 int oldbitset = (*p & m) != 0;
286 *p &= ~m;
287 return oldbitset;
291 * test_and_change_bit - Change a bit and return its old value
292 * @nr: Bit to change
293 * @addr: Address to count from
295 * This operation is atomic and cannot be reordered.
296 * It also implies the acquisition side of the memory barrier.
298 static __inline__ int
299 test_and_change_bit (int nr, volatile void *addr)
301 __u32 bit, old, new;
302 volatile __u32 *m;
303 CMPXCHG_BUGCHECK_DECL
305 m = (volatile __u32 *) addr + (nr >> 5);
306 bit = (1 << (nr & 31));
307 do {
308 CMPXCHG_BUGCHECK(m);
309 old = *m;
310 new = old ^ bit;
311 } while (cmpxchg_acq(m, old, new) != old);
312 return (old & bit) != 0;
316 * __test_and_change_bit - Change a bit and return its old value
317 * @nr: Bit to change
318 * @addr: Address to count from
320 * This operation is non-atomic and can be reordered.
322 static __inline__ int
323 __test_and_change_bit (int nr, void *addr)
325 __u32 old, bit = (1 << (nr & 31));
326 __u32 *m = (__u32 *) addr + (nr >> 5);
328 old = *m;
329 *m = old ^ bit;
330 return (old & bit) != 0;
333 static __inline__ int
334 test_bit (int nr, const volatile void *addr)
336 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
340 * ffz - find the first zero bit in a long word
341 * @x: The long word to find the bit in
343 * Returns the bit-number (0..63) of the first (least significant) zero bit.
344 * Undefined if no zero exists, so code should check against ~0UL first...
346 static inline unsigned long
347 ffz (unsigned long x)
349 unsigned long result;
351 result = ia64_popcnt(x & (~x - 1));
352 return result;
356 * __ffs - find first bit in word.
357 * @x: The word to search
359 * Undefined if no bit exists, so code should check against 0 first.
361 static __inline__ unsigned long
362 __ffs (unsigned long x)
364 unsigned long result;
366 result = ia64_popcnt((x-1) & ~x);
367 return result;
370 #ifdef __KERNEL__
373 * Return bit number of last (most-significant) bit set. Undefined
374 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
376 static inline unsigned long
377 ia64_fls (unsigned long x)
379 long double d = x;
380 long exp;
382 exp = ia64_getf_exp(d);
383 return exp - 0xffff;
387 * Find the last (most significant) bit set. Returns 0 for x==0 and
388 * bits are numbered from 1..32 (e.g., fls(9) == 4).
390 static inline int
391 fls (int t)
393 unsigned long x = t & 0xffffffffu;
395 if (!x)
396 return 0;
397 x |= x >> 1;
398 x |= x >> 2;
399 x |= x >> 4;
400 x |= x >> 8;
401 x |= x >> 16;
402 return ia64_popcnt(x);
406 * Find the last (most significant) bit set. Undefined for x==0.
407 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
409 static inline unsigned long
410 __fls (unsigned long x)
412 x |= x >> 1;
413 x |= x >> 2;
414 x |= x >> 4;
415 x |= x >> 8;
416 x |= x >> 16;
417 x |= x >> 32;
418 return ia64_popcnt(x) - 1;
421 #include <asm-generic/bitops/fls64.h>
423 #include <asm-generic/bitops/builtin-ffs.h>
426 * hweightN: returns the hamming weight (i.e. the number
427 * of bits set) of a N-bit word
429 static __inline__ unsigned long __arch_hweight64(unsigned long x)
431 unsigned long result;
432 result = ia64_popcnt(x);
433 return result;
436 #define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437 #define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438 #define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
440 #include <asm-generic/bitops/const_hweight.h>
442 #endif /* __KERNEL__ */
444 #include <asm-generic/bitops/find.h>
446 #ifdef __KERNEL__
448 #include <asm-generic/bitops/le.h>
450 #include <asm-generic/bitops/ext2-atomic-setbit.h>
452 #include <asm-generic/bitops/sched.h>
454 #endif /* __KERNEL__ */
456 #endif /* _ASM_IA64_BITOPS_H */