1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=AVX,AVX512
9 ; AVX1 has support for 256-bit bitwise logic because the FP variants were included.
10 ; If using those ops requires extra insert/extract though, it's probably not worth it.
12 define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
15 ; SSE-NEXT: paddd %xmm2, %xmm0
16 ; SSE-NEXT: paddd %xmm3, %xmm1
17 ; SSE-NEXT: pand %xmm5, %xmm1
18 ; SSE-NEXT: pand %xmm4, %xmm0
19 ; SSE-NEXT: psubd %xmm6, %xmm0
20 ; SSE-NEXT: psubd %xmm7, %xmm1
23 ; AVX1-LABEL: PR32790:
25 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm4
26 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
27 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
28 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
29 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
30 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
31 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
32 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
33 ; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm1
34 ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
35 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
38 ; AVX2-LABEL: PR32790:
40 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
41 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
42 ; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
45 ; AVX512-LABEL: PR32790:
47 ; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
48 ; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
49 ; AVX512-NEXT: vpsubd %ymm3, %ymm0, %ymm0
51 %add = add <8 x i32> %a, %b
52 %and = and <8 x i32> %add, %c
53 %sub = sub <8 x i32> %and, %d
57 ; In a more extreme case, even the later AVX targets should avoid extract/insert just
58 ; because 256-bit ops are supported.
60 define <4 x i32> @do_not_use_256bit_op(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
61 ; SSE-LABEL: do_not_use_256bit_op:
63 ; SSE-NEXT: pand %xmm2, %xmm0
64 ; SSE-NEXT: pand %xmm3, %xmm1
65 ; SSE-NEXT: psubd %xmm1, %xmm0
68 ; AVX-LABEL: do_not_use_256bit_op:
70 ; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
71 ; AVX-NEXT: vpand %xmm3, %xmm1, %xmm1
72 ; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
74 %concat1 = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
75 %concat2 = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
76 %and = and <8 x i32> %concat1, %concat2
77 %extract1 = shufflevector <8 x i32> %and, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
78 %extract2 = shufflevector <8 x i32> %and, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
79 %sub = sub <4 x i32> %extract1, %extract2
83 ; When extracting from a vector binop, the source width should be a multiple of the destination width.
84 ; https://bugs.llvm.org/show_bug.cgi?id=39511
86 define <3 x float> @PR39511(<4 x float> %t0, ptr %b) {
89 ; SSE-NEXT: addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
94 ; AVX-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
96 %add = fadd <4 x float> %t0, <float 1.0, float 2.0, float 3.0, float 4.0>
97 %ext = shufflevector <4 x float> %add, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
101 ; When extracting from a vector binop, we need to be extracting
102 ; by a width of at least 1 of the original vector elements.
103 ; https://bugs.llvm.org/show_bug.cgi?id=39893
105 define <2 x i8> @PR39893(<2 x i32> %x, <8 x i8> %y) {
106 ; SSE-LABEL: PR39893:
108 ; SSE-NEXT: pxor %xmm2, %xmm2
109 ; SSE-NEXT: psubd %xmm0, %xmm2
110 ; SSE-NEXT: psrld $16, %xmm2
111 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
112 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
113 ; SSE-NEXT: movdqa %xmm2, %xmm0
116 ; AVX-LABEL: PR39893:
118 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
119 ; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
120 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
121 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
122 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
124 %sub = sub <2 x i32> <i32 0, i32 undef>, %x
125 %bc = bitcast <2 x i32> %sub to <8 x i8>
126 %shuffle = shufflevector <8 x i8> %y, <8 x i8> %bc, <2 x i32> <i32 10, i32 4>
127 ret <2 x i8> %shuffle
130 define <2 x i8> @PR39893_2(<2 x float> %x) {
131 ; SSE-LABEL: PR39893_2:
133 ; SSE-NEXT: xorps %xmm1, %xmm1
134 ; SSE-NEXT: subps %xmm0, %xmm1
135 ; SSE-NEXT: movaps %xmm1, %xmm0
138 ; AVX-LABEL: PR39893_2:
140 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
141 ; AVX-NEXT: vsubps %xmm0, %xmm1, %xmm0
143 %fsub = fsub <2 x float> zeroinitializer, %x
144 %bc = bitcast <2 x float> %fsub to <8 x i8>
145 %shuffle = shufflevector <8 x i8> %bc, <8 x i8> undef, <2 x i32> <i32 0, i32 1>
146 ret <2 x i8> %shuffle
149 define <4 x double> @fmul_v2f64(<2 x double> %x, <2 x double> %y) {
150 ; SSE-LABEL: fmul_v2f64:
152 ; SSE-NEXT: movapd %xmm1, %xmm2
153 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
154 ; SSE-NEXT: mulpd %xmm2, %xmm2
155 ; SSE-NEXT: mulpd %xmm1, %xmm1
156 ; SSE-NEXT: addpd %xmm1, %xmm2
157 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
158 ; SSE-NEXT: movapd %xmm2, %xmm0
161 ; AVX1-LABEL: fmul_v2f64:
163 ; AVX1-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm1[0],xmm0[0]
164 ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
165 ; AVX1-NEXT: vmulpd %xmm0, %xmm0, %xmm0
166 ; AVX1-NEXT: vmulpd %xmm2, %xmm2, %xmm1
167 ; AVX1-NEXT: vaddpd %xmm0, %xmm1, %xmm0
168 ; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
171 ; AVX2-LABEL: fmul_v2f64:
173 ; AVX2-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm1[0],xmm0[0]
174 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
175 ; AVX2-NEXT: vmulpd %xmm0, %xmm0, %xmm0
176 ; AVX2-NEXT: vmulpd %xmm2, %xmm2, %xmm1
177 ; AVX2-NEXT: vaddpd %xmm0, %xmm1, %xmm0
178 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
181 ; AVX512-LABEL: fmul_v2f64:
183 ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
184 ; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
185 ; AVX512-NEXT: vmulpd %xmm0, %xmm0, %xmm0
186 ; AVX512-NEXT: vfmadd231pd {{.*#+}} xmm0 = (xmm2 * xmm2) + xmm0
187 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
189 %s = shufflevector <2 x double> %x, <2 x double> %y, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
190 %bo = fmul fast <4 x double> %s, %s
191 %ext = shufflevector <4 x double> %bo, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
192 %add = fadd fast <4 x double> %bo, %ext
193 %rdx = shufflevector <4 x double> %add, <4 x double> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
194 ret <4 x double> %rdx