1 #ifndef __ASM_ARM_WORD_AT_A_TIME_H
2 #define __ASM_ARM_WORD_AT_A_TIME_H
7 * Little-endian word-at-a-time zero byte handling.
8 * Heavily based on the x86 algorithm.
10 #include <linux/kernel.h>
12 struct word_at_a_time
{
13 const unsigned long one_bits
, high_bits
;
16 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18 static inline unsigned long has_zero(unsigned long a
, unsigned long *bits
,
19 const struct word_at_a_time
*c
)
21 unsigned long mask
= ((a
- c
->one_bits
) & ~a
) & c
->high_bits
;
26 #define prep_zero_mask(a, bits, c) (bits)
28 static inline unsigned long create_zero_mask(unsigned long bits
)
30 bits
= (bits
- 1) & ~bits
;
34 static inline unsigned long find_zero(unsigned long mask
)
38 #if __LINUX_ARM_ARCH__ >= 5
39 /* We have clz available. */
42 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
43 ret
= (0x0ff0001 + mask
) >> 23;
44 /* Fix the 1 for 00 case */
51 #ifdef CONFIG_DCACHE_WORD_ACCESS
53 #define zero_bytemask(mask) (mask)
56 * Load an unaligned word from kernel space.
58 * In the (very unlikely) case of the word being a page-crosser
59 * and the next page not being mapped, take the exception and
60 * return zeroes in the non-existing part.
62 static inline unsigned long load_unaligned_zeropad(const void *addr
)
64 unsigned long ret
, offset
;
66 /* Load word from unaligned pointer addr */
70 " .pushsection .fixup,\"ax\"\n"
72 "3: and %1, %2, #0x3\n"
79 " .pushsection __ex_table,\"a\"\n"
83 : "=&r" (ret
), "=&r" (offset
)
84 : "r" (addr
), "Qo" (*(unsigned long *)addr
));
90 #endif /* DCACHE_WORD_ACCESS */
93 #include <asm-generic/word-at-a-time.h>
96 #endif /* __ASM_ARM_WORD_AT_A_TIME_H */