[AMDGPU][True16][CodeGen] true16 codegen pattern for v_med3_u/i16 (#121850)
[llvm-project.git] / llvm / test / MC / AsmParser / sse2avx-att.s
bloba452a5c611d3a29f5ee4c9806383cb2f454ce5af
1 # RUN: llvm-mc -triple x86_64 -x86-sse2avx %s | FileCheck %s
2 # RUN: llvm-mc -triple=x86_64 -output-asm-variant=1 %s | llvm-mc -triple=x86_64 -x86-asm-syntax=intel -x86-sse2avx
3 .text
4 # CHECK: vmovsd -352(%rbp), %xmm0
5 movsd -352(%rbp), %xmm0 # xmm0 = mem[0],zero
6 # CHECK-NEXT: vunpcklpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
7 unpcklpd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
8 # CHECK-NEXT: vmovapd %xmm0, -368(%rbp)
9 movapd %xmm0, -368(%rbp)
10 # CHECK-NEXT: vmovapd -368(%rbp), %xmm0
11 movapd -368(%rbp), %xmm0
12 # CHECK-NEXT: vmovsd -376(%rbp), %xmm1
13 movsd -376(%rbp), %xmm1 # xmm1 = mem[0],zero
14 # CHECK-NEXT: vmovsd -384(%rbp), %xmm0
15 movsd -384(%rbp), %xmm0 # xmm0 = mem[0],zero
16 # CHECK-NEXT: vunpcklpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
17 unpcklpd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
18 # CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
19 addpd %xmm1, %xmm0
20 # CHECK-NEXT: vmovapd %xmm0, -464(%rbp)
21 movapd %xmm0, -464(%rbp)
22 # CHECK-NEXT: vmovaps -304(%rbp), %xmm1
23 movaps -304(%rbp), %xmm1
24 # CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
25 pandn %xmm1, %xmm0
26 # CHECK-NEXT: vmovaps %xmm0, -480(%rbp)
27 movaps %xmm0, -480(%rbp)
28 # CHECK-NEXT: vmovss -220(%rbp), %xmm1
29 movss -220(%rbp), %xmm1 # xmm1 = mem[0],zero,zero,zero
30 # CHECK-NEXT: vinsertps $16, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
31 insertps $16, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
32 # CHECK-NEXT: vmovaps %xmm0, -496(%rbp)
33 movaps %xmm0, -496(%rbp)
34 # CHECK-NEXT: vmovss -256(%rbp), %xmm0
35 movss -256(%rbp), %xmm0 # xmm0 = mem[0],zero,zero,zero
36 # CHECK-NEXT: vmovaps -192(%rbp), %xmm0
37 movaps -192(%rbp), %xmm0
38 # CHECK-NEXT: vdivss %xmm1, %xmm0, %xmm0
39 divss %xmm1, %xmm0
40 # CHECK-NEXT: vmovaps %xmm0, -192(%rbp)
41 movaps %xmm0, -192(%rbp)
42 # CHECK-NEXT: vmovd -128(%rbp), %xmm0
43 movd -128(%rbp), %xmm0 # xmm0 = mem[0],zero,zero,zero
44 # CHECK-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
45 pinsrd $1, %edx, %xmm0
46 # CHECK-NEXT: vmovaps %xmm0, -144(%rbp)
47 movaps %xmm0, -144(%rbp)
48 # CHECK-NEXT: vmovd -160(%rbp), %xmm0
49 movd -160(%rbp), %xmm0 # xmm0 = mem[0],zero,zero,zero
50 # CHECK-NEXT: vpblendw $170, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
51 pblendw $170, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
52 # CHECK-NEXT: vmovdqa %xmm0, -576(%rbp)
53 movdqa %xmm0, -576(%rbp)
54 # CHECK-NEXT: vphsubw %xmm1, %xmm0, %xmm0
55 phsubw %xmm1, %xmm0
56 # CHECK-NEXT: vmovdqa %xmm0, -592(%rbp)
57 movdqa %xmm0, -592(%rbp)
58 # CHECK-NEXT: vmovaps -496(%rbp), %xmm0
59 movaps -496(%rbp), %xmm0
60 # CHECK-NEXT: vroundps $8, %xmm0, %xmm0
61 roundps $8, %xmm0, %xmm0
62 # CHECK-NEXT: vmovaps %xmm0, -608(%rbp)
63 movaps %xmm0, -608(%rbp)
64 # CHECK-NEXT: vmovapd -432(%rbp), %xmm0
65 movapd -432(%rbp), %xmm0
66 # CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
67 pxor %xmm1, %xmm0
68 # CHECK-NEXT: vmovaps %xmm0, -640(%rbp)
69 movaps %xmm0, -640(%rbp)
70 # CHECK-NEXT: vmovapd -32(%rbp), %xmm0
71 movapd -32(%rbp), %xmm0
72 # CHECK-NEXT: vmovupd %xmm0, (%rax)
73 movupd %xmm0, (%rax)
74 # CHECK-NEXT: vmovsd -656(%rbp), %xmm0
75 movsd -656(%rbp), %xmm0 # xmm0 = mem[0],zero
76 # CHECK-NEXT: extrq $16, $8, %xmm0 # xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
77 extrq $16, $8, %xmm0
78 # CHECK-NEXT: insertq $16, $8, %xmm1, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3,4,5,6,7,u,u,u,u,u,u,u,u]
79 insertq $16, $8, %xmm1, %xmm0
80 # CHECK-NEXT: pshufw $1, %mm0, %mm2 # mm2 = mm0[1,0,0,0]
81 pshufw $1, %mm0, %mm2
82 # CHECK-NEXT: vpblendvb %xmm2, %xmm2, %xmm1, %xmm1
83 pblendvb %xmm0, %xmm2, %xmm1
84 # CHECK-NEXT: vblendvps %xmm0, %xmm0, %xmm2, %xmm2
85 blendvps %xmm0, %xmm0, %xmm2
86 # CHECK-NEXT: vblendvpd %xmm0, %xmm0, %xmm2, %xmm2
87 blendvpd %xmm0, %xmm0, %xmm2
88 # CHECK-NEXT: vblendvpd %xmm0, %xmm0, %xmm2, %xmm2
89 blendvpd %xmm0, %xmm2