* added 0.99 linux version
[mascara-docs.git] / i386 / linux / linux-2.3.21 / include / asm-alpha / byteorder.h
blobe574bb6e736938c19d31274ad9c0764e071f11ab
1 #ifndef _ALPHA_BYTEORDER_H
2 #define _ALPHA_BYTEORDER_H
4 #include <asm/types.h>
6 /* EGCS 1.1 can, without scheduling, do just as good as we do here
7 with the standard macros. And since it can schedule, it does even
8 better in the end. */
10 #if defined(__GNUC__) && __GNUC_MINOR__ < 91
12 static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
14 __u64 t1, t2, t3;
16 /* Break the final or's out of the block so that gcc can
17 schedule them at will. Further, use add not or so that
18 we elide the sign extend gcc will put in because the
19 return type is not a long. */
21 __asm__(
22 "insbl %3,3,%1 # %1 = dd000000\n\t"
23 "zapnot %3,2,%2 # %2 = 0000cc00\n\t"
24 "sll %2,8,%2 # %2 = 00cc0000\n\t"
25 "or %2,%1,%1 # %1 = ddcc0000\n\t"
26 "zapnot %3,4,%2 # %2 = 00bb0000\n\t"
27 "extbl %3,3,%0 # %0 = 000000aa\n\t"
28 "srl %2,8,%2 # %2 = 0000bb00"
29 : "=r"(t3), "=&r"(t1), "=&r"(t2)
30 : "r"(x));
32 return t3 + t2 + t1;
35 static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
37 __u64 t1, t2;
39 __asm__(
40 "insbl %2,1,%1 # %1 = bb00\n\t"
41 "extbl %2,1,%0 # %0 = 00aa"
42 : "=r"(t1), "=&r"(t2) : "r"(x));
44 return t1 | t2;
47 #define __arch__swab32(x) ___arch__swab32(x)
48 #define __arch__swab16(x) ___arch__swab16(x)
50 #endif /* __GNUC__ */
52 #define __BYTEORDER_HAS_U64__
54 #include <linux/byteorder/little_endian.h>
56 #endif /* _ALPHA_BYTEORDER_H */