locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / arch / x86 / include / asm / word-at-a-time.h
blob06006b0351f3cfb3fc3865f9dcde0d0a7d21c102
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_WORD_AT_A_TIME_H
3 #define _ASM_WORD_AT_A_TIME_H
5 #include <linux/kernel.h>
7 /*
8 * This is largely generic for little-endian machines, but the
9 * optimal byte mask counting is probably going to be something
10 * that is architecture-specific. If you have a reliably fast
11 * bit count instruction, that might be better than the multiply
12 * and shift, for example.
14 struct word_at_a_time {
15 const unsigned long one_bits, high_bits;
18 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20 #ifdef CONFIG_64BIT
23 * Jan Achrenius on G+: microoptimized version of
24 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
25 * that works for the bytemasks without having to
26 * mask them first.
28 static inline long count_masked_bytes(unsigned long mask)
30 return mask*0x0001020304050608ul >> 56;
33 #else /* 32-bit case */
35 /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
36 static inline long count_masked_bytes(long mask)
38 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
39 long a = (0x0ff0001+mask) >> 23;
40 /* Fix the 1 for 00 case */
41 return a & mask;
44 #endif
46 /* Return nonzero if it has a zero */
47 static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
49 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
50 *bits = mask;
51 return mask;
54 static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
56 return bits;
59 static inline unsigned long create_zero_mask(unsigned long bits)
61 bits = (bits - 1) & ~bits;
62 return bits >> 7;
65 /* The mask we created is directly usable as a bytemask */
66 #define zero_bytemask(mask) (mask)
68 static inline unsigned long find_zero(unsigned long mask)
70 return count_masked_bytes(mask);
74 * Load an unaligned word from kernel space.
76 * In the (very unlikely) case of the word being a page-crosser
77 * and the next page not being mapped, take the exception and
78 * return zeroes in the non-existing part.
80 static inline unsigned long load_unaligned_zeropad(const void *addr)
82 unsigned long ret, dummy;
84 asm(
85 "1:\tmov %2,%0\n"
86 "2:\n"
87 ".section .fixup,\"ax\"\n"
88 "3:\t"
89 "lea %2,%1\n\t"
90 "and %3,%1\n\t"
91 "mov (%1),%0\n\t"
92 "leal %2,%%ecx\n\t"
93 "andl %4,%%ecx\n\t"
94 "shll $3,%%ecx\n\t"
95 "shr %%cl,%0\n\t"
96 "jmp 2b\n"
97 ".previous\n"
98 _ASM_EXTABLE(1b, 3b)
99 :"=&r" (ret),"=&c" (dummy)
100 :"m" (*(unsigned long *)addr),
101 "i" (-sizeof(unsigned long)),
102 "i" (sizeof(unsigned long)-1));
103 return ret;
106 #endif /* _ASM_WORD_AT_A_TIME_H */