spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / include / linux / bitops.h
blob94300fe46ccedd4ca5333fad2b0bac4a74b704b0
1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
3 #include <asm/types.h>
5 #ifdef __KERNEL__
6 #define BIT(nr) (1UL << (nr))
7 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
8 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
9 #define BITS_PER_BYTE 8
10 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11 #endif
13 extern unsigned int __sw_hweight8(unsigned int w);
14 extern unsigned int __sw_hweight16(unsigned int w);
15 extern unsigned int __sw_hweight32(unsigned int w);
16 extern unsigned long __sw_hweight64(__u64 w);
19 * Include this here because some architectures need generic_ffs/fls in
20 * scope
22 #include <asm/bitops.h>
24 #define for_each_set_bit(bit, addr, size) \
25 for ((bit) = find_first_bit((addr), (size)); \
26 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1))
29 /* same as for_each_set_bit() but use bit as value to start with */
30 #define for_each_set_bit_cont(bit, addr, size) \
31 for ((bit) = find_next_bit((addr), (size), (bit)); \
32 (bit) < (size); \
33 (bit) = find_next_bit((addr), (size), (bit) + 1))
35 static __inline__ int get_bitmask_order(unsigned int count)
37 int order;
39 order = fls(count);
40 return order; /* We could be slightly more clever with -1 here... */
43 static __inline__ int get_count_order(unsigned int count)
45 int order;
47 order = fls(count) - 1;
48 if (count & (count - 1))
49 order++;
50 return order;
53 static inline unsigned long hweight_long(unsigned long w)
55 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
58 /**
59 * rol64 - rotate a 64-bit value left
60 * @word: value to rotate
61 * @shift: bits to roll
63 static inline __u64 rol64(__u64 word, unsigned int shift)
65 return (word << shift) | (word >> (64 - shift));
68 /**
69 * ror64 - rotate a 64-bit value right
70 * @word: value to rotate
71 * @shift: bits to roll
73 static inline __u64 ror64(__u64 word, unsigned int shift)
75 return (word >> shift) | (word << (64 - shift));
78 /**
79 * rol32 - rotate a 32-bit value left
80 * @word: value to rotate
81 * @shift: bits to roll
83 static inline __u32 rol32(__u32 word, unsigned int shift)
85 return (word << shift) | (word >> (32 - shift));
88 /**
89 * ror32 - rotate a 32-bit value right
90 * @word: value to rotate
91 * @shift: bits to roll
93 static inline __u32 ror32(__u32 word, unsigned int shift)
95 return (word >> shift) | (word << (32 - shift));
98 /**
99 * rol16 - rotate a 16-bit value left
100 * @word: value to rotate
101 * @shift: bits to roll
103 static inline __u16 rol16(__u16 word, unsigned int shift)
105 return (word << shift) | (word >> (16 - shift));
109 * ror16 - rotate a 16-bit value right
110 * @word: value to rotate
111 * @shift: bits to roll
113 static inline __u16 ror16(__u16 word, unsigned int shift)
115 return (word >> shift) | (word << (16 - shift));
119 * rol8 - rotate an 8-bit value left
120 * @word: value to rotate
121 * @shift: bits to roll
123 static inline __u8 rol8(__u8 word, unsigned int shift)
125 return (word << shift) | (word >> (8 - shift));
129 * ror8 - rotate an 8-bit value right
130 * @word: value to rotate
131 * @shift: bits to roll
133 static inline __u8 ror8(__u8 word, unsigned int shift)
135 return (word >> shift) | (word << (8 - shift));
139 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
140 * @value: value to sign extend
141 * @index: 0 based bit index (0<=index<32) to sign bit
143 static inline __s32 sign_extend32(__u32 value, int index)
145 __u8 shift = 31 - index;
146 return (__s32)(value << shift) >> shift;
149 static inline unsigned fls_long(unsigned long l)
151 if (sizeof(l) == 4)
152 return fls(l);
153 return fls64(l);
157 * __ffs64 - find first set bit in a 64 bit word
158 * @word: The 64 bit word
160 * On 64 bit arches this is a synomyn for __ffs
161 * The result is not defined if no bits are set, so check that @word
162 * is non-zero before calling this.
164 static inline unsigned long __ffs64(u64 word)
166 #if BITS_PER_LONG == 32
167 if (((u32)word) == 0UL)
168 return __ffs((u32)(word >> 32)) + 32;
169 #elif BITS_PER_LONG != 64
170 #error BITS_PER_LONG not 32 or 64
171 #endif
172 return __ffs((unsigned long)word);
175 #ifdef __KERNEL__
177 #ifndef find_last_bit
179 * find_last_bit - find the last set bit in a memory region
180 * @addr: The address to start the search at
181 * @size: The maximum size to search
183 * Returns the bit number of the first set bit, or size.
185 extern unsigned long find_last_bit(const unsigned long *addr,
186 unsigned long size);
187 #endif
189 #endif /* __KERNEL__ */
190 #endif