Automatic date update in version.in
[binutils-gdb.git] / gdb / testsuite / gdb.reverse / i386-avx-reverse.c
blob815657594b0a53abc29db498a49bda8d74137288
1 /* This testcase is part of GDB, the GNU debugger.
3 Copyright 2023 Free Software Foundation, Inc.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18 /* Architecture tests for intel i386 platform. */
20 #include <stdlib.h>
22 char global_buf0[] = {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
23 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
24 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
25 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f};
26 char global_buf1[] = {0, 0, 0, 0, 0, 0, 0, 0,
27 0, 0, 0, 0, 0, 0, 0, 0,
28 0, 0, 0, 0, 0, 0, 0, 0,
29 0, 0, 0, 0, 0, 0, 0, 0};
30 char *dyn_buf0;
31 char *dyn_buf1;
33 int
34 vmov_test ()
36 char buf0[] = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
37 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
38 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
39 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f};
40 char buf1[] = {0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0,
43 0, 0, 0, 0, 0, 0, 0, 0};
45 /*start vmov_test. */
47 /* Operations on registers. */
48 asm volatile ("mov $0, %rcx");
49 asm volatile ("mov $0xbeef, %rax");
50 asm volatile ("vmovd %rax, %xmm0");
51 asm volatile ("vmovd %xmm0, %rcx");
52 asm volatile ("vmovq %xmm0, %xmm15");
53 asm volatile ("vmovq %0, %%xmm15": : "m" (buf1));
55 /* Operations based on local buffers. */
56 asm volatile ("vmovd %0, %%xmm0": : "m"(buf0));
57 asm volatile ("vmovd %%xmm0, %0": "=m"(buf1));
58 asm volatile ("vmovq %0, %%xmm0": : "m"(buf0));
59 asm volatile ("vmovq %%xmm0, %0": "=m"(buf1));
61 /* Operations based on global buffers. */
62 asm volatile ("vmovd %0, %%xmm0": : "m"(global_buf0));
63 asm volatile ("vmovd %%xmm0, %0": "=m"(global_buf1));
64 asm volatile ("vmovq %0, %%xmm0": : "m"(global_buf0));
65 asm volatile ("vmovq %%xmm0, %0": "=m"(global_buf1));
67 /* Operations based on dynamic buffers. */
68 asm volatile ("vmovd %0, %%xmm0": : "m"(*dyn_buf0));
69 asm volatile ("vmovd %%xmm0, %0": "=m"(*dyn_buf1));
70 asm volatile ("vmovq %0, %%xmm0": : "m"(*dyn_buf0));
71 asm volatile ("vmovq %%xmm0, %0": "=m"(*dyn_buf1));
73 /* Reset all relevant buffers. */
74 asm volatile ("vmovq %%xmm15, %0": "=m" (buf1));
75 asm volatile ("vmovq %%xmm15, %0": "=m" (global_buf1));
76 asm volatile ("vmovq %%xmm15, %0": "=m" (*dyn_buf1));
78 /* Quick test for a different xmm register. */
79 asm volatile ("vmovd %0, %%xmm15": "=m" (buf0));
80 asm volatile ("vmovd %0, %%xmm15": "=m" (buf1));
81 asm volatile ("vmovq %0, %%xmm15": "=m" (buf0));
82 asm volatile ("vmovq %0, %%xmm15": "=m" (buf1));
84 /* Test vmovdq style instructions. */
85 /* For local and global buffers, we can't guarantee they will be aligned.
86 However, the aligned and unaligned versions seem to be encoded the same,
87 so testing one is enough to validate both. For safety, though, the
88 dynamic buffers are forced to be 32-bit aligned so vmovdqa can be
89 explicitly tested at least once. */
91 /* Operations based on local buffers. */
92 asm volatile ("vmovdqu %0, %%ymm0": : "m"(buf0));
93 asm volatile ("vmovdqu %%ymm0, %0": "=m"(buf1));
95 /* Operations based on global buffers. */
96 asm volatile ("vmovdqu %0, %%ymm0": : "m"(global_buf0));
97 asm volatile ("vmovdqu %%ymm0, %0": "=m"(global_buf1));
99 /* Operations based on dynamic buffers. */
100 asm volatile ("vmovdqa %0, %%ymm15": : "m"(*dyn_buf0));
101 asm volatile ("vmovdqa %%ymm15, %0": "=m"(*dyn_buf1));
102 asm volatile ("vmovdqu %0, %%ymm0": : "m"(*dyn_buf0));
103 asm volatile ("vmovdqu %%ymm0, %0": "=m"(*dyn_buf1));
105 /* Operations between 2 registers. */
106 asm volatile ("vmovdqu %ymm15, %ymm0");
107 asm volatile ("vmovdqu %ymm2, %ymm15");
108 asm volatile ("vmovdqa %ymm15, %ymm0");
110 /* Testing vmov [ss|sd] instructions. */
111 /* Note, vmovss only works with XMM registers, not YMM registers,
112 according to the intel manual. Also, initializing the variables
113 uses xmm0 in my machine, so we can't test with it, so use xmm1
114 instead. */
116 /* Move single precision floats to and from memory. */
117 float f1 = 1.5, f2 = 4.2;
118 asm volatile ("vmovss %0, %%xmm1" : : "m"(f1));
119 asm volatile ("vmovss %0, %%xmm15": : "m"(f2));
120 asm volatile ("vmovss %%xmm1, %0" : "=m"(f2));
121 asm volatile ("vmovss %%xmm15, %0": "=m"(f1));
123 asm volatile ("vmovss %xmm15, %xmm1, %xmm2");
124 asm volatile ("vmovss %xmm15, %xmm1, %xmm8");
125 asm volatile ("vmovss %xmm1, %xmm2, %xmm15");
126 asm volatile ("vmovss %xmm2, %xmm15, %xmm1");
128 /* Testing double precision floats. */
129 double d1 = -1.5, d2 = -2.5;
130 asm volatile ("vmovsd %0, %%xmm1" : : "m"(d1));
131 asm volatile ("vmovsd %0, %%xmm15": : "m"(d2));
132 asm volatile ("vmovsd %%xmm1, %0" : "=m"(d2));
133 asm volatile ("vmovsd %%xmm15, %0": "=m"(d1));
135 asm volatile ("vmovsd %xmm15, %xmm1, %xmm2");
136 asm volatile ("vmovsd %xmm15, %xmm1, %xmm8");
137 asm volatile ("vmovsd %xmm1, %xmm2, %xmm15");
138 asm volatile ("vmovsd %xmm2, %xmm15, %xmm1");
140 /* "reset" all the buffers. This doesn't zero them all, but
141 it zeroes the start which lets us ensure the tests see
142 some changes. */
143 asm volatile ("vmovq %%xmm3, %0": "=m" (buf1));
144 asm volatile ("vmovq %%xmm3, %0": "=m" (global_buf1));
145 asm volatile ("vmovq %%xmm3, %0": "=m" (*dyn_buf1));
147 /* Testing vmovu[ps|pd] instructions. Even though there are aligned
148 versions of these instructions like vmovdq[u|a], they have different
149 opcodes, meaning they'll need to be tested separately. */
151 asm volatile ("vmovups %0, %%xmm0" : : "m"(buf0));
152 asm volatile ("vmovupd %0, %%ymm15" : : "m"(buf1));
153 asm volatile ("vmovupd %%xmm0, %0" : : "m"(buf1));
154 asm volatile ("vmovups %%ymm15, %0" : : "m"(buf1));
156 asm volatile ("vmovups %0, %%xmm0" : : "m"(global_buf0));
157 asm volatile ("vmovupd %0, %%ymm15" : : "m"(global_buf1));
158 asm volatile ("vmovupd %%xmm0, %0" : : "m"(global_buf1));
159 asm volatile ("vmovups %%ymm15, %0" : : "m"(global_buf1));
161 asm volatile ("vmovups %0, %%xmm0" : : "m"(*dyn_buf0));
162 asm volatile ("vmovupd %0, %%ymm15" : : "m"(*dyn_buf1));
163 asm volatile ("vmovupd %%xmm0, %0" : : "m"(*dyn_buf1));
164 asm volatile ("vmovups %%ymm15, %0" : : "m"(*dyn_buf1));
166 asm volatile ("vmovaps %0, %%xmm0" : : "m"(*dyn_buf0));
167 asm volatile ("vmovapd %0, %%ymm15" : : "m"(*dyn_buf1));
168 asm volatile ("vmovapd %%xmm0, %0" : : "m"(*dyn_buf1));
169 asm volatile ("vmovaps %%ymm15, %0" : : "m"(*dyn_buf1));
171 /* We have a return statement to deal with
172 epilogue in different compilers. */
173 return 0; /* end vmov_test */
176 /* Test if we can properly record (and undo) vpunpck style instructions.
177 Most tests will use xmm0 and xmm1 as sources. The registers xmm15 and xmm2
178 are used as destination to ensure we're reading the VEX.R bit correctly. */
180 vpunpck_test ()
182 /* Using GDB, load these values onto registers, for ease of testing.
183 ymm0.v2_int128 = {0x1f1e1d1c1b1a19181716151413121110, 0x2f2e2d2c2b2a29282726252423222120}
184 ymm1.v2_int128 = {0x4f4e4d4c4b4a49484746454443424140, 0x3f3e3d3c3b3a39383736353433323130}
185 ymm2.v2_int128 = {0x0, 0x0}
186 ymm15.v2_int128 = {0xdead, 0xbeef}
187 so that's easy to confirm that the unpacking went as expected. */
189 /* start vpunpck_test. */
191 /* First try all low bit unpack instructions with xmm registers. */
192 /* 17 27 16 26 15 25 14 24 ...*/
193 asm volatile ("vpunpcklbw %xmm0, %xmm1, %xmm15");
194 /* 17 16 27 26 15 14 25 24 ...*/
195 asm volatile ("vpunpcklwd %0, %%xmm1, %%xmm15"
196 : : "m" (global_buf0));
197 /* 17 16 15 14 27 26 25 24 ...*/
198 asm volatile ("vpunpckldq %0, %%xmm1, %%xmm2"
199 : : "m" (global_buf0));
200 /* 17 16 15 14 13 12 11 10 ...*/
201 asm volatile ("vpunpcklqdq %xmm0, %xmm1, %xmm2");
203 /* Then try all high bit unpack instructions with xmm registers. */
204 /* 17 27 16 26 15 25 14 24 ...*/
205 asm volatile ("vpunpckhbw %xmm0, %xmm1, %xmm15");
206 /* 17 16 27 26 15 14 25 24 ...*/
207 asm volatile ("vpunpckhwd %0, %%xmm1, %%xmm15"
208 : : "m" (global_buf0));
209 /* 17 16 15 14 27 26 25 24 ...*/
210 asm volatile ("vpunpckhdq %0, %%xmm1, %%xmm2"
211 : : "m" (global_buf0));
212 /* 17 16 15 14 13 12 11 10 ...*/
213 asm volatile ("vpunpckhqdq %xmm0, %xmm1, %xmm2");
215 /* Lastly, lets test a few unpack instructions with ymm registers. */
216 /* 17 27 16 26 15 25 14 24 ...*/
217 asm volatile ("vpunpcklbw %ymm0, %ymm1, %ymm15");
218 /* 17 16 27 26 15 14 25 24 ...*/
219 asm volatile ("vpunpcklwd %ymm0, %ymm1, %ymm15");
220 /* 17 16 15 14 27 26 25 24 ...*/
221 asm volatile ("vpunpckhdq %ymm0, %ymm1, %ymm15");
222 /* 17 16 15 14 13 12 11 10 ...*/
223 asm volatile ("vpunpckhqdq %ymm0, %ymm1, %ymm15");
225 /* Test some of the floating point unpack instructions. */
226 /* 17 27 16 26 15 25 14 24 ...*/
227 asm volatile ("vunpcklps %xmm0, %xmm1, %xmm15");
228 /* 17 16 27 26 15 14 25 24 ...*/
229 asm volatile ("vunpcklps %ymm0, %ymm1, %ymm2");
230 /* 17 16 15 14 27 26 25 24 ...*/
231 asm volatile ("vunpcklpd %xmm0, %xmm1, %xmm2");
232 /* 17 16 15 14 13 12 11 10 ...*/
233 asm volatile ("vunpcklpd %ymm0, %ymm1, %ymm15");
234 /* 17 27 16 26 15 25 14 24 ...*/
235 asm volatile ("vunpckhps %xmm0, %xmm1, %xmm15");
236 /* 17 16 27 26 15 14 25 24 ...*/
237 asm volatile ("vunpckhps %ymm0, %ymm1, %ymm2");
238 /* 17 16 15 14 27 26 25 24 ...*/
239 asm volatile ("vunpckhpd %xmm0, %xmm1, %xmm2");
240 /* 17 16 15 14 13 12 11 10 ...*/
241 asm volatile ("vunpckhpd %ymm0, %ymm1, %ymm15");
243 /* We have a return statement to deal with
244 epilogue in different compilers. */
245 return 0; /* end vpunpck_test */
248 /* Test if we can record vpbroadcast instructions. */
250 vpbroadcast_test ()
252 /* Using GDB, load this value onto the register, for ease of testing.
253 xmm0.uint128 = 0x0
254 xmm1.uint128 = 0x1f1e1d1c1b1a19181716151413121110
255 xmm15.uint128 = 0x0
256 this way it's easy to confirm we're undoing things correctly. */
257 /* start vpbroadcast_test. */
259 asm volatile ("vpbroadcastb %xmm1, %xmm0");
260 asm volatile ("vpbroadcastb %xmm1, %xmm15");
262 asm volatile ("vpbroadcastw %xmm1, %ymm0");
263 asm volatile ("vpbroadcastw %xmm1, %ymm15");
265 asm volatile ("vpbroadcastd %xmm1, %xmm0");
266 asm volatile ("vpbroadcastd %xmm1, %xmm15");
268 asm volatile ("vpbroadcastq %xmm1, %ymm0");
269 asm volatile ("vpbroadcastq %xmm1, %ymm15");
271 /* We have a return statement to deal with
272 epilogue in different compilers. */
273 return 0; /* end vpbroadcast_test */
277 vzeroupper_test ()
279 /* start vzeroupper_test. */
280 /* Using GDB, load this value onto the register, for ease of testing.
281 ymm0.v2_int128 = {0x0, 0x12345}
282 ymm1.v2_int128 = {0x1f1e1d1c1b1a1918, 0x0}
283 ymm2.v2_int128 = {0x0, 0xbeef}
284 ymm15.v2_int128 = {0x0, 0xcafeface}
285 this way it's easy to confirm we're undoing things correctly. */
287 asm volatile ("vzeroupper");
289 /* We have a return statement to deal with
290 epilogue in different compilers. */
291 return 0; /* end vzeroupper_test */
295 vpor_xor_test ()
297 /* start vpor_xor_test. */
298 /* Using GDB, load this value onto the register, for ease of testing.
299 ymm0.v2_int128 = {0x0, 0x12345}
300 ymm1.v2_int128 = {0x1f1e1d1c1b1a1918, 0x0}
301 ymm2.v2_int128 = {0x0, 0xbeef}
302 ymm15.v2_int128 = {0x0, 0xcafeface}
303 this way it's easy to confirm we're undoing things correctly. */
305 asm volatile ("vpxor %ymm0, %ymm0, %ymm0");
306 asm volatile ("vpxor %xmm0, %xmm1, %xmm0");
307 asm volatile ("vpxor %ymm2, %ymm15, %ymm1");
308 asm volatile ("vpxor %xmm2, %xmm15, %xmm2");
309 asm volatile ("vpxor %ymm2, %ymm1, %ymm15");
311 asm volatile ("vpor %ymm0, %ymm0, %ymm0");
312 asm volatile ("vpor %xmm0, %xmm1, %xmm0");
313 asm volatile ("vpor %ymm2, %ymm15, %ymm1");
314 asm volatile ("vpor %xmm2, %xmm15, %xmm2");
315 asm volatile ("vpor %ymm2, %ymm1, %ymm15");
316 return 0; /* end vpor_xor_test */
320 vpcmpeq_test ()
322 /* start vpcmpeq_test. */
323 /* Using GDB, load these values onto registers for testing.
324 ymm0.v2_int128 = {0x0, 0x12345}
325 ymm1.v8_int32 = {0xcafe, 0xbeef, 0xff, 0x1234, 0x0, 0xff00, 0xff0000ff, 0xface0f0f}
326 ymm2.v8_int32 = {0xcafe0, 0xbeef, 0xff00, 0x12345678, 0x90abcdef, 0xffff00, 0xff, 0xf}
327 ymm15.v2_int128 = {0xcafeface, 0xcafeface}
328 this way it's easy to confirm we're undoing things correctly. */
330 /* Test all the vpcmpeq variants on a low register (number 0). */
331 asm volatile ("vpcmpeqb %xmm1, %xmm2, %xmm0");
332 asm volatile ("vpcmpeqw %xmm1, %xmm2, %xmm0");
333 asm volatile ("vpcmpeqd %xmm1, %xmm2, %xmm0");
335 asm volatile ("vpcmpeqb %ymm1, %ymm2, %ymm0");
336 asm volatile ("vpcmpeqw %ymm1, %ymm2, %ymm0");
337 asm volatile ("vpcmpeqd %ymm1, %ymm2, %ymm0");
339 /* Test all the vpcmpeq variants on a high register (number 15). */
340 asm volatile ("vpcmpeqb %xmm1, %xmm2, %xmm15");
341 asm volatile ("vpcmpeqw %xmm1, %xmm2, %xmm15");
342 asm volatile ("vpcmpeqd %xmm1, %xmm2, %xmm15");
344 asm volatile ("vpcmpeqb %ymm1, %ymm2, %ymm15");
345 asm volatile ("vpcmpeqw %ymm1, %ymm2, %ymm15");
346 asm volatile ("vpcmpeqd %ymm1, %ymm2, %ymm15");
347 return 0; /* end vpcmpeq_test */
351 vpmovmskb_test ()
353 /* start vpmovmskb_test. */
354 /* Using GDB, load these values onto registers for testing.
355 rbx = 2
356 r8 = 3
357 r9 = 4
358 this way it's easy to confirm we're undoing things correctly. */
359 asm volatile ("vpmovmskb %ymm0, %eax");
360 asm volatile ("vpmovmskb %ymm0, %ebx");
362 asm volatile ("vpmovmskb %ymm0, %r8");
363 asm volatile ("vpmovmskb %ymm0, %r9");
364 return 0; /* end vpmovmskb_test */
367 /* Test record arithmetic instructions. */
369 arith_test ()
371 /* start arith_test. */
372 /* Using GDB, load these values onto registers for testing.
373 ymm0.v8_float = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5}
374 ymm1.v8_float = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5}
375 ymm15.v2_int128 = {0x0, 0x0}
376 this way it's easy to confirm we're undoing things correctly. */
377 asm volatile ("vaddps %xmm0, %xmm1, %xmm15");
378 asm volatile ("vaddps %ymm0, %ymm1, %ymm15");
379 asm volatile ("vaddpd %xmm0, %xmm1, %xmm15");
380 asm volatile ("vaddpd %ymm0, %ymm1, %ymm15");
381 asm volatile ("vaddss %xmm0, %xmm1, %xmm15");
382 asm volatile ("vaddsd %xmm0, %xmm1, %xmm15");
384 asm volatile ("vmulps %xmm0, %xmm1, %xmm15");
385 asm volatile ("vmulps %ymm0, %ymm1, %ymm15");
386 asm volatile ("vmulpd %xmm0, %xmm1, %xmm15");
387 asm volatile ("vmulpd %ymm0, %ymm1, %ymm15");
388 asm volatile ("vmulss %xmm0, %xmm1, %xmm15");
389 asm volatile ("vmulsd %xmm0, %xmm1, %xmm15");
391 asm volatile ("vsubps %xmm0, %xmm1, %xmm15");
392 asm volatile ("vsubps %ymm0, %ymm1, %ymm15");
393 asm volatile ("vsubpd %xmm0, %xmm1, %xmm15");
394 asm volatile ("vsubpd %ymm0, %ymm1, %ymm15");
395 asm volatile ("vsubss %xmm0, %xmm1, %xmm15");
396 asm volatile ("vsubsd %xmm0, %xmm1, %xmm15");
398 asm volatile ("vdivps %xmm0, %xmm1, %xmm15");
399 asm volatile ("vdivps %ymm0, %ymm1, %ymm15");
400 asm volatile ("vdivpd %xmm0, %xmm1, %xmm15");
401 asm volatile ("vdivpd %ymm0, %ymm1, %ymm15");
402 asm volatile ("vdivss %xmm0, %xmm1, %xmm15");
403 asm volatile ("vdivsd %xmm0, %xmm1, %xmm15");
405 asm volatile ("vminps %xmm0, %xmm1, %xmm15");
406 asm volatile ("vminps %ymm0, %ymm1, %ymm15");
407 asm volatile ("vminpd %xmm0, %xmm1, %xmm15");
408 asm volatile ("vminpd %ymm0, %ymm1, %ymm15");
409 asm volatile ("vminss %xmm0, %xmm1, %xmm15");
410 asm volatile ("vminsd %xmm0, %xmm1, %xmm15");
412 asm volatile ("vmaxps %xmm0, %xmm1, %xmm15");
413 asm volatile ("vmaxps %ymm0, %ymm1, %ymm15");
414 asm volatile ("vmaxpd %xmm0, %xmm1, %xmm15");
415 asm volatile ("vmaxpd %ymm0, %ymm1, %ymm15");
416 asm volatile ("vmaxss %xmm0, %xmm1, %xmm15");
417 asm volatile ("vmaxsd %xmm0, %xmm1, %xmm15");
419 return 0; /* end arith_test */
422 /* This include is used to allocate the dynamic buffer and have
423 the pointers aligned to a 32-bit boundary, so we can test instructions
424 that require aligned memory. */
425 #include "precise-aligned-alloc.c"
428 main ()
430 dyn_buf0 = (char *) precise_aligned_alloc(32, sizeof(char) * 32, NULL);
431 dyn_buf1 = (char *) precise_aligned_alloc(32, sizeof(char) * 32, NULL);
432 for (int i =0; i < 32; i++)
434 dyn_buf0[i] = 0x20 + (i % 16);
435 dyn_buf1[i] = 0;
437 /* Zero relevant xmm registers, se we know what to look for. */
438 asm volatile ("vmovq %0, %%xmm0": : "m" (global_buf1));
439 asm volatile ("vmovq %0, %%xmm1": : "m" (global_buf1));
440 asm volatile ("vmovq %0, %%xmm2": : "m" (global_buf1));
441 asm volatile ("vmovq %0, %%xmm3": : "m" (global_buf1));
442 asm volatile ("vmovq %0, %%xmm15": : "m" (global_buf1));
444 vmov_test ();
445 vpunpck_test ();
446 vpbroadcast_test ();
447 vzeroupper_test ();
448 vpor_xor_test ();
449 vpcmpeq_test ();
450 vpmovmskb_test ();
451 arith_test ();
452 return 0; /* end of main */