kmemtrace: SLOB hooks.
[linux-2.6/kmemtrace.git] / include / asm-powerpc / asm-compat.h
blobc19e7367fce67e9db83c1e59fc5c753898d3fffa
1 #ifndef _ASM_POWERPC_ASM_COMPAT_H
2 #define _ASM_POWERPC_ASM_COMPAT_H
4 #include <asm/types.h>
6 #ifdef __ASSEMBLY__
7 # define stringify_in_c(...) __VA_ARGS__
8 # define ASM_CONST(x) x
9 #else
10 /* This version of stringify will deal with commas... */
11 # define __stringify_in_c(...) #__VA_ARGS__
12 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
13 # define __ASM_CONST(x) x##UL
14 # define ASM_CONST(x) __ASM_CONST(x)
15 #endif
19 * Feature section common macros
21 * Note that the entries now contain offsets between the table entry
22 * and the code rather than absolute code pointers in order to be
23 * useable with the vdso shared library. There is also an assumption
24 * that values will be negative, that is, the fixup table has to be
25 * located after the code it fixes up.
27 #ifdef CONFIG_PPC64
28 #ifdef __powerpc64__
29 /* 64 bits kernel, 64 bits code */
30 #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
31 99: \
32 .section sect,"a"; \
33 .align 3; \
34 98: \
35 .llong msk; \
36 .llong val; \
37 .llong label##b-98b; \
38 .llong 99b-98b; \
39 .previous
40 #else /* __powerpc64__ */
41 /* 64 bits kernel, 32 bits code (ie. vdso32) */
42 #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
43 99: \
44 .section sect,"a"; \
45 .align 3; \
46 98: \
47 .llong msk; \
48 .llong val; \
49 .long 0xffffffff; \
50 .long label##b-98b; \
51 .long 0xffffffff; \
52 .long 99b-98b; \
53 .previous
54 #endif /* !__powerpc64__ */
55 #else /* CONFIG_PPC64 */
56 /* 32 bits kernel, 32 bits code */
57 #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
58 99: \
59 .section sect,"a"; \
60 .align 2; \
61 98: \
62 .long msk; \
63 .long val; \
64 .long label##b-98b; \
65 .long 99b-98b; \
66 .previous
67 #endif /* !CONFIG_PPC64 */
69 #ifdef __powerpc64__
71 /* operations for longs and pointers */
72 #define PPC_LL stringify_in_c(ld)
73 #define PPC_STL stringify_in_c(std)
74 #define PPC_LCMPI stringify_in_c(cmpdi)
75 #define PPC_LONG stringify_in_c(.llong)
76 #define PPC_TLNEI stringify_in_c(tdnei)
77 #define PPC_LLARX stringify_in_c(ldarx)
78 #define PPC_STLCX stringify_in_c(stdcx.)
79 #define PPC_CNTLZL stringify_in_c(cntlzd)
81 /* Move to CR, single-entry optimized version. Only available
82 * on POWER4 and later.
84 #ifdef CONFIG_POWER4_ONLY
85 #define PPC_MTOCRF stringify_in_c(mtocrf)
86 #else
87 #define PPC_MTOCRF stringify_in_c(mtcrf)
88 #endif
90 #else /* 32-bit */
92 /* operations for longs and pointers */
93 #define PPC_LL stringify_in_c(lwz)
94 #define PPC_STL stringify_in_c(stw)
95 #define PPC_LCMPI stringify_in_c(cmpwi)
96 #define PPC_LONG stringify_in_c(.long)
97 #define PPC_TLNEI stringify_in_c(twnei)
98 #define PPC_LLARX stringify_in_c(lwarx)
99 #define PPC_STLCX stringify_in_c(stwcx.)
100 #define PPC_CNTLZL stringify_in_c(cntlzw)
101 #define PPC_MTOCRF stringify_in_c(mtcrf)
103 #endif
105 #ifdef __KERNEL__
106 #ifdef CONFIG_IBM405_ERR77
107 /* Erratum #77 on the 405 means we need a sync or dcbt before every
108 * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
110 #define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
111 #define PPC405_ERR77_SYNC stringify_in_c(sync;)
112 #else
113 #define PPC405_ERR77(ra,rb)
114 #define PPC405_ERR77_SYNC
115 #endif
116 #endif
118 #endif /* _ASM_POWERPC_ASM_COMPAT_H */