[CRYPTO] api: Add crypto_ablkcipher_ctx_aligned
[pv_ops_mirror.git] / include / asm-i386 / byteorder.h
bloba45470a8b74abb89d6c19b10985c2ef4bc8cd450
1 #ifndef _I386_BYTEORDER_H
2 #define _I386_BYTEORDER_H
4 #include <asm/types.h>
5 #include <linux/compiler.h>
7 #ifdef __GNUC__
9 /* For avoiding bswap on i386 */
10 #ifdef __KERNEL__
11 #endif
13 static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
15 #ifdef CONFIG_X86_BSWAP
16 __asm__("bswap %0" : "=r" (x) : "0" (x));
17 #else
18 __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
19 "rorl $16,%0\n\t" /* swap words */
20 "xchgb %b0,%h0" /* swap higher bytes */
21 :"=q" (x)
22 : "0" (x));
23 #endif
24 return x;
27 static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
29 union {
30 struct { __u32 a,b; } s;
31 __u64 u;
32 } v;
33 v.u = val;
34 #ifdef CONFIG_X86_BSWAP
35 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
36 : "=r" (v.s.a), "=r" (v.s.b)
37 : "0" (v.s.a), "1" (v.s.b));
38 #else
39 v.s.a = ___arch__swab32(v.s.a);
40 v.s.b = ___arch__swab32(v.s.b);
41 asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
42 #endif
43 return v.u;
46 /* Do not define swab16. Gcc is smart enough to recognize "C" version and
47 convert it into rotation or exhange. */
49 #define __arch__swab64(x) ___arch__swab64(x)
50 #define __arch__swab32(x) ___arch__swab32(x)
52 #define __BYTEORDER_HAS_U64__
54 #endif /* __GNUC__ */
56 #include <linux/byteorder/little_endian.h>
58 #endif /* _I386_BYTEORDER_H */