1 #ifndef _PPC_BYTEORDER_H
2 #define _PPC_BYTEORDER_H
5 #include <linux/compiler.h>
10 extern __inline__
unsigned ld_le16(const volatile unsigned short *addr
)
14 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
18 extern __inline__
void st_le16(volatile unsigned short *addr
, const unsigned val
)
20 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
23 extern __inline__
unsigned ld_le32(const volatile unsigned *addr
)
27 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
31 extern __inline__
void st_le32(volatile unsigned *addr
, const unsigned val
)
33 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
36 static __inline__ __attribute_const__ __u16
___arch__swab16(__u16 value
)
40 __asm__("rlwimi %0,%2,8,16,23" : "=&r" (result
) : "0" (value
>> 8), "r" (value
));
44 static __inline__ __attribute_const__ __u32
___arch__swab32(__u32 value
)
48 __asm__("rlwimi %0,%2,24,16,23" : "=&r" (result
) : "0" (value
>>24), "r" (value
));
49 __asm__("rlwimi %0,%2,8,8,15" : "=&r" (result
) : "0" (result
), "r" (value
));
50 __asm__("rlwimi %0,%2,24,0,7" : "=&r" (result
) : "0" (result
), "r" (value
));
54 #define __arch__swab32(x) ___arch__swab32(x)
55 #define __arch__swab16(x) ___arch__swab16(x)
57 /* The same, but returns converted value from the location pointer by addr. */
58 #define __arch__swab16p(addr) ld_le16(addr)
59 #define __arch__swab32p(addr) ld_le32(addr)
61 /* The same, but do the conversion in situ, ie. put the value back to addr. */
62 #define __arch__swab16s(addr) st_le16(addr,*addr)
63 #define __arch__swab32s(addr) st_le32(addr,*addr)
65 #endif /* __KERNEL__ */
67 #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
68 # define __BYTEORDER_HAS_U64__
69 # define __SWAB_64_THRU_32__
74 #include <linux/byteorder/big_endian.h>
76 #endif /* _PPC_BYTEORDER_H */