FreeBSD: add file descriptor tracking for _umtx_op
[valgrind.git] / none / tests / s390x / xor.h
blobc73651ab47b2cf4fe0a40765af412917f397dedd
1 #include <stdio.h>
3 /* Dummy variable. Needed to work around GCC code generation bugs */
4 volatile long v;
6 #define XOR_REG_MEM(insn, s1, s2) \
7 ({ \
8 unsigned long tmp = s1; \
9 int cc; \
10 asm volatile( #insn " %0, %3\n" \
11 "ipm %1\n" \
12 "srl %1,28\n" \
13 : "+d" (tmp), "=d" (cc) \
14 : "d" (tmp), "Q" (s2) \
15 : "0", "cc"); \
16 printf(#insn " %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, s2, tmp, cc); \
19 #define XOR_REG_REG(insn, s1, s2) \
20 ({ \
21 unsigned long tmp = s1; \
22 int cc; \
23 asm volatile( #insn " %0, %3\n" \
24 "ipm %1\n" \
25 "srl %1,28\n" \
26 : "+d" (tmp), "=d" (cc) \
27 : "d" (tmp), "d" (s2) \
28 : "0", "cc"); \
29 printf(#insn " %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, s2, tmp, cc); \
32 #define XOR_REG_IMM(insn, s1, s2) \
33 ({ \
34 register unsigned long tmp asm("2") = s1; \
35 int cc; \
36 asm volatile( insn(2,s2) \
37 "ipm %1\n" \
38 "srl %1,28\n" \
39 : "+d" (tmp), "=d" (cc) \
40 : "d" (tmp) \
41 : "cc"); \
42 v = tmp; /* work around GCC code gen bug */ \
43 printf(#insn " %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, (unsigned long) 0x##s2, v, cc); \
46 #define XOR_MEM_IMM(insn, s1, s2) \
47 ({ \
48 unsigned long tmp = s1; \
49 int cc; \
50 asm volatile( #insn " %0," #s2 "\n" \
51 "ipm %1\n" \
52 "srl %1,28\n" \
53 : "+Q" (tmp), "=d" (cc) \
54 : "Q" (tmp) \
55 : "0", "cc"); \
56 printf(#insn " %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, (unsigned long) s2, tmp, cc); \
60 #define memsweep(i, s2) \
61 ({ \
62 XOR_REG_MEM(i, 0ul, s2); \
63 XOR_REG_MEM(i, 1ul, s2); \
64 XOR_REG_MEM(i, 0xfffful, s2); \
65 XOR_REG_MEM(i, 0x7ffful, s2); \
66 XOR_REG_MEM(i, 0x8000ul, s2); \
67 XOR_REG_MEM(i, 0xfffffffful, s2); \
68 XOR_REG_MEM(i, 0x80000000ul, s2); \
69 XOR_REG_MEM(i, 0x7ffffffful, s2); \
70 XOR_REG_MEM(i, 0xaaaaaaaaaaaaaaaaul, s2); \
71 XOR_REG_MEM(i, 0x8000000000000000ul, s2); \
72 XOR_REG_MEM(i, 0xfffffffffffffffful, s2); \
73 XOR_REG_MEM(i, 0x5555555555555555ul, s2); \
76 #define regsweep(i, s2) \
77 ({ \
78 XOR_REG_REG(i, 0ul, s2); \
79 XOR_REG_REG(i, 1ul, s2); \
80 XOR_REG_REG(i, 0xfffful, s2); \
81 XOR_REG_REG(i, 0x7ffful, s2); \
82 XOR_REG_REG(i, 0x8000ul, s2); \
83 XOR_REG_REG(i, 0xfffffffful, s2); \
84 XOR_REG_REG(i, 0x80000000ul, s2); \
85 XOR_REG_REG(i, 0x7ffffffful, s2); \
86 XOR_REG_REG(i, 0xaaaaaaaaaaaaaaaaul, s2); \
87 XOR_REG_REG(i, 0x8000000000000000ul, s2); \
88 XOR_REG_REG(i, 0xfffffffffffffffful, s2); \
89 XOR_REG_REG(i, 0x5555555555555555ul, s2); \
92 #define immsweep(i, s2) \
93 ({ \
94 XOR_REG_IMM(i, 0ul, s2); \
95 XOR_REG_IMM(i, 1ul, s2); \
96 XOR_REG_IMM(i, 0xfffful, s2); \
97 XOR_REG_IMM(i, 0x7ffful, s2); \
98 XOR_REG_IMM(i, 0x8000ul, s2); \
99 XOR_REG_IMM(i, 0xfffffffful, s2); \
100 XOR_REG_IMM(i, 0x80000000ul, s2); \
101 XOR_REG_IMM(i, 0x7ffffffful, s2); \
102 XOR_REG_IMM(i, 0xaaaaaaaaaaaaaaaaul, s2); \
103 XOR_REG_IMM(i, 0x8000000000000000ul, s2); \
104 XOR_REG_IMM(i, 0xfffffffffffffffful, s2); \
105 XOR_REG_IMM(i, 0x5555555555555555ul, s2); \
108 #define memimmsweep(i, s2) \
109 ({ \
110 XOR_MEM_IMM(i, 0ul, s2); \
111 XOR_MEM_IMM(i, 1ul, s2); \
112 XOR_MEM_IMM(i, 0xfffful, s2); \
113 XOR_MEM_IMM(i, 0x7ffful, s2); \
114 XOR_MEM_IMM(i, 0x8000ul, s2); \
115 XOR_MEM_IMM(i, 0xfffffffful, s2); \
116 XOR_MEM_IMM(i, 0x80000000ul, s2); \
117 XOR_MEM_IMM(i, 0x7ffffffful, s2); \
118 XOR_MEM_IMM(i, 0xaaaaaaaaaaaaaaaaul, s2); \
119 XOR_MEM_IMM(i, 0x8000000000000000ul, s2); \
120 XOR_MEM_IMM(i, 0xfffffffffffffffful, s2); \
121 XOR_MEM_IMM(i, 0x5555555555555555ul, s2); \
124 #define XOR_XY(s1, s2) \
125 ({ \
126 register unsigned long tmp asm("1") = s1; \
127 register unsigned long *addr asm("2") = &s2; \
128 int cc; \
129 asm volatile( XY(1,0,2,000,00) \
130 "ipm %1\n" \
131 "srl %1,28\n" \
132 : "+d" (tmp), "=d" (cc) \
133 : "d" (tmp), "d"(addr) \
134 : "cc"); \
135 printf("xy %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, s2, tmp, cc); \
138 #define XOR_XIY(s1, i2) \
139 ({ \
140 unsigned long tmp = s1; \
141 register unsigned long *addr asm("2") = &tmp; \
142 int cc; \
143 asm volatile( XIY(i2,2,000,00) \
144 "ipm %1\n" \
145 "srl %1,28\n" \
146 : "+Q" (tmp), "=d" (cc) \
147 : "Q" (tmp), "d" (addr) \
148 : "cc"); \
149 printf("xiy %16.16lX ^ %16.16lX = %16.16lX (cc=%d)\n", s1, (unsigned long) 0x##i2, tmp, cc); \
152 #define xysweep(s2) \
153 ({ \
154 XOR_XY(0ul, s2); \
155 XOR_XY(1ul, s2); \
156 XOR_XY(0xfffful, s2); \
157 XOR_XY(0x7ffful, s2); \
158 XOR_XY(0x8000ul, s2); \
159 XOR_XY(0xfffffffful, s2); \
160 XOR_XY(0x80000000ul, s2); \
161 XOR_XY(0x7ffffffful, s2); \
162 XOR_XY(0xaaaaaaaaaaaaaaaaul, s2); \
163 XOR_XY(0x8000000000000000ul, s2); \
164 XOR_XY(0xfffffffffffffffful, s2); \
165 XOR_XY(0x5555555555555555ul, s2); \
168 #define xiysweep(s2) \
169 ({ \
170 XOR_XIY(0ul, s2); \
171 XOR_XIY(1ul, s2); \
172 XOR_XIY(0xfffful, s2); \
173 XOR_XIY(0x7ffful, s2); \
174 XOR_XIY(0x8000ul, s2); \
175 XOR_XIY(0xfffffffful, s2); \
176 XOR_XIY(0x80000000ul, s2); \
177 XOR_XIY(0x7ffffffful, s2); \
178 XOR_XIY(0xaaaaaaaaaaaaaaaaul, s2); \
179 XOR_XIY(0x8000000000000000ul, s2); \
180 XOR_XIY(0xfffffffffffffffful, s2); \
181 XOR_XIY(0x5555555555555555ul, s2); \