[PKT_SCHED]: Cleanup qdisc creation and alignment macros
[hh.org.git] / include / asm-alpha / compiler.h
blob399c33b7be5112732a4f1f31d2d401a696df511e
1 #ifndef __ALPHA_COMPILER_H
2 #define __ALPHA_COMPILER_H
4 /*
5 * Herein are macros we use when describing various patterns we want to GCC.
6 * In all cases we can get better schedules out of the compiler if we hide
7 * as little as possible inside inline assembly. However, we want to be
8 * able to know what we'll get out before giving up inline assembly. Thus
9 * these tests and macros.
12 #if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
13 # define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
14 # define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
15 # define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
16 # define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
17 # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
18 # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
19 # define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
20 # define __kernel_cttz(x) __builtin_ctzl(x)
21 # define __kernel_ctlz(x) __builtin_clzl(x)
22 # define __kernel_ctpop(x) __builtin_popcountl(x)
23 #else
24 # define __kernel_insbl(val, shift) \
25 ({ unsigned long __kir; \
26 __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
27 __kir; })
28 # define __kernel_inswl(val, shift) \
29 ({ unsigned long __kir; \
30 __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
31 __kir; })
32 # define __kernel_insql(val, shift) \
33 ({ unsigned long __kir; \
34 __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
35 __kir; })
36 # define __kernel_inslh(val, shift) \
37 ({ unsigned long __kir; \
38 __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
39 __kir; })
40 # define __kernel_extbl(val, shift) \
41 ({ unsigned long __kir; \
42 __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
43 __kir; })
44 # define __kernel_extwl(val, shift) \
45 ({ unsigned long __kir; \
46 __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
47 __kir; })
48 # define __kernel_cmpbge(a, b) \
49 ({ unsigned long __kir; \
50 __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
51 __kir; })
52 # define __kernel_cttz(x) \
53 ({ unsigned long __kir; \
54 __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
55 __kir; })
56 # define __kernel_ctlz(x) \
57 ({ unsigned long __kir; \
58 __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
59 __kir; })
60 # define __kernel_ctpop(x) \
61 ({ unsigned long __kir; \
62 __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
63 __kir; })
64 #endif
67 /*
68 * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX
69 * extension is enabled. Previous versions did not define anything
70 * we could test during compilation -- too bad, so sad.
73 #if defined(__alpha_bwx__)
74 #define __kernel_ldbu(mem) (mem)
75 #define __kernel_ldwu(mem) (mem)
76 #define __kernel_stb(val,mem) ((mem) = (val))
77 #define __kernel_stw(val,mem) ((mem) = (val))
78 #else
79 #define __kernel_ldbu(mem) \
80 ({ unsigned char __kir; \
81 __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
82 __kir; })
83 #define __kernel_ldwu(mem) \
84 ({ unsigned short __kir; \
85 __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
86 __kir; })
87 #define __kernel_stb(val,mem) \
88 __asm__("stb %1,%0" : "=m"(mem) : "r"(val))
89 #define __kernel_stw(val,mem) \
90 __asm__("stw %1,%0" : "=m"(mem) : "r"(val))
91 #endif
93 /* Some idiots over in <linux/compiler.h> thought inline should imply
94 always_inline. This breaks stuff. We'll include this file whenever
95 we run into such problems. */
97 #include <linux/compiler.h>
98 #undef inline
99 #undef __inline__
100 #undef __inline
103 #endif /* __ALPHA_COMPILER_H */