KVM: SVM: Issue WBINVD after deactivating an SEV guest
[linux/fpc-iii.git] / lib / find_bit.c
blobe35a76b291e69e812faa266a30282a248b43d5d3
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* bit search implementation
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * Copyright (C) 2008 IBM Corporation
8 * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
9 * (Inspired by David Howell's find_next_bit implementation)
11 * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
12 * size and improve performance, 2015.
15 #include <linux/bitops.h>
16 #include <linux/bitmap.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
20 #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
21 !defined(find_next_and_bit)
24 * This is a common helper function for find_next_bit, find_next_zero_bit, and
25 * find_next_and_bit. The differences are:
26 * - The "invert" argument, which is XORed with each fetched word before
27 * searching it for one bits.
28 * - The optional "addr2", which is anded with "addr1" if present.
30 static inline unsigned long _find_next_bit(const unsigned long *addr1,
31 const unsigned long *addr2, unsigned long nbits,
32 unsigned long start, unsigned long invert)
34 unsigned long tmp;
36 if (unlikely(start >= nbits))
37 return nbits;
39 tmp = addr1[start / BITS_PER_LONG];
40 if (addr2)
41 tmp &= addr2[start / BITS_PER_LONG];
42 tmp ^= invert;
44 /* Handle 1st word. */
45 tmp &= BITMAP_FIRST_WORD_MASK(start);
46 start = round_down(start, BITS_PER_LONG);
48 while (!tmp) {
49 start += BITS_PER_LONG;
50 if (start >= nbits)
51 return nbits;
53 tmp = addr1[start / BITS_PER_LONG];
54 if (addr2)
55 tmp &= addr2[start / BITS_PER_LONG];
56 tmp ^= invert;
59 return min(start + __ffs(tmp), nbits);
61 #endif
63 #ifndef find_next_bit
65 * Find the next set bit in a memory region.
67 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
68 unsigned long offset)
70 return _find_next_bit(addr, NULL, size, offset, 0UL);
72 EXPORT_SYMBOL(find_next_bit);
73 #endif
75 #ifndef find_next_zero_bit
76 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
77 unsigned long offset)
79 return _find_next_bit(addr, NULL, size, offset, ~0UL);
81 EXPORT_SYMBOL(find_next_zero_bit);
82 #endif
84 #if !defined(find_next_and_bit)
85 unsigned long find_next_and_bit(const unsigned long *addr1,
86 const unsigned long *addr2, unsigned long size,
87 unsigned long offset)
89 return _find_next_bit(addr1, addr2, size, offset, 0UL);
91 EXPORT_SYMBOL(find_next_and_bit);
92 #endif
94 #ifndef find_first_bit
96 * Find the first set bit in a memory region.
98 unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
100 unsigned long idx;
102 for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
103 if (addr[idx])
104 return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
107 return size;
109 EXPORT_SYMBOL(find_first_bit);
110 #endif
112 #ifndef find_first_zero_bit
114 * Find the first cleared bit in a memory region.
116 unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
118 unsigned long idx;
120 for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
121 if (addr[idx] != ~0UL)
122 return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
125 return size;
127 EXPORT_SYMBOL(find_first_zero_bit);
128 #endif
130 #ifndef find_last_bit
131 unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
133 if (size) {
134 unsigned long val = BITMAP_LAST_WORD_MASK(size);
135 unsigned long idx = (size-1) / BITS_PER_LONG;
137 do {
138 val &= addr[idx];
139 if (val)
140 return idx * BITS_PER_LONG + __fls(val);
142 val = ~0ul;
143 } while (idx--);
145 return size;
147 EXPORT_SYMBOL(find_last_bit);
148 #endif
150 #ifdef __BIG_ENDIAN
152 /* include/linux/byteorder does not support "unsigned long" type */
153 static inline unsigned long ext2_swab(const unsigned long y)
155 #if BITS_PER_LONG == 64
156 return (unsigned long) __swab64((u64) y);
157 #elif BITS_PER_LONG == 32
158 return (unsigned long) __swab32((u32) y);
159 #else
160 #error BITS_PER_LONG not defined
161 #endif
164 #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
165 static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
166 const unsigned long *addr2, unsigned long nbits,
167 unsigned long start, unsigned long invert)
169 unsigned long tmp;
171 if (unlikely(start >= nbits))
172 return nbits;
174 tmp = addr1[start / BITS_PER_LONG];
175 if (addr2)
176 tmp &= addr2[start / BITS_PER_LONG];
177 tmp ^= invert;
179 /* Handle 1st word. */
180 tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
181 start = round_down(start, BITS_PER_LONG);
183 while (!tmp) {
184 start += BITS_PER_LONG;
185 if (start >= nbits)
186 return nbits;
188 tmp = addr1[start / BITS_PER_LONG];
189 if (addr2)
190 tmp &= addr2[start / BITS_PER_LONG];
191 tmp ^= invert;
194 return min(start + __ffs(ext2_swab(tmp)), nbits);
196 #endif
198 #ifndef find_next_zero_bit_le
199 unsigned long find_next_zero_bit_le(const void *addr, unsigned
200 long size, unsigned long offset)
202 return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
204 EXPORT_SYMBOL(find_next_zero_bit_le);
205 #endif
207 #ifndef find_next_bit_le
208 unsigned long find_next_bit_le(const void *addr, unsigned
209 long size, unsigned long offset)
211 return _find_next_bit_le(addr, NULL, size, offset, 0UL);
213 EXPORT_SYMBOL(find_next_bit_le);
214 #endif
216 #endif /* __BIG_ENDIAN */
218 unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
219 unsigned long size, unsigned long offset)
221 offset = find_next_bit(addr, size, offset);
222 if (offset == size)
223 return size;
225 offset = round_down(offset, 8);
226 *clump = bitmap_get_value8(addr, offset);
228 return offset;
230 EXPORT_SYMBOL(find_next_clump8);