1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX1-SLOW
5 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx,+fast-hops | FileCheck %s --check-prefix=AVX1-FAST
6 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
7 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512
8 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512
14 define float @test_v2f32(float %a0, <2 x float> %a1) {
15 ; SSE2-LABEL: test_v2f32:
17 ; SSE2-NEXT: movaps %xmm1, %xmm2
18 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
19 ; SSE2-NEXT: addss %xmm1, %xmm2
20 ; SSE2-NEXT: addss %xmm2, %xmm0
23 ; SSE41-LABEL: test_v2f32:
25 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
26 ; SSE41-NEXT: addss %xmm1, %xmm2
27 ; SSE41-NEXT: addss %xmm2, %xmm0
30 ; AVX1-SLOW-LABEL: test_v2f32:
32 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
33 ; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
34 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
35 ; AVX1-SLOW-NEXT: retq
37 ; AVX1-FAST-LABEL: test_v2f32:
39 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
40 ; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
41 ; AVX1-FAST-NEXT: retq
43 ; AVX2-LABEL: test_v2f32:
45 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
46 ; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
47 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
50 ; AVX512-LABEL: test_v2f32:
52 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
53 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
54 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
56 %1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float %a0, <2 x float> %a1)
60 define float @test_v4f32(float %a0, <4 x float> %a1) {
61 ; SSE2-LABEL: test_v4f32:
63 ; SSE2-NEXT: movaps %xmm1, %xmm2
64 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
65 ; SSE2-NEXT: addps %xmm1, %xmm2
66 ; SSE2-NEXT: movaps %xmm2, %xmm1
67 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
68 ; SSE2-NEXT: addss %xmm2, %xmm1
69 ; SSE2-NEXT: addss %xmm1, %xmm0
72 ; SSE41-LABEL: test_v4f32:
74 ; SSE41-NEXT: movaps %xmm1, %xmm2
75 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
76 ; SSE41-NEXT: addps %xmm1, %xmm2
77 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
78 ; SSE41-NEXT: addss %xmm2, %xmm1
79 ; SSE41-NEXT: addss %xmm1, %xmm0
82 ; AVX1-SLOW-LABEL: test_v4f32:
84 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
85 ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
86 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
87 ; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
88 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
89 ; AVX1-SLOW-NEXT: retq
91 ; AVX1-FAST-LABEL: test_v4f32:
93 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
94 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
95 ; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
96 ; AVX1-FAST-NEXT: retq
98 ; AVX2-LABEL: test_v4f32:
100 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
101 ; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
102 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
103 ; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
104 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
107 ; AVX512-LABEL: test_v4f32:
109 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
110 ; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
111 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
112 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
113 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
115 %1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float %a0, <4 x float> %a1)
119 define float @test_v8f32(float %a0, <8 x float> %a1) {
120 ; SSE2-LABEL: test_v8f32:
122 ; SSE2-NEXT: addps %xmm2, %xmm1
123 ; SSE2-NEXT: movaps %xmm1, %xmm2
124 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
125 ; SSE2-NEXT: addps %xmm1, %xmm2
126 ; SSE2-NEXT: movaps %xmm2, %xmm1
127 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
128 ; SSE2-NEXT: addss %xmm2, %xmm1
129 ; SSE2-NEXT: addss %xmm1, %xmm0
132 ; SSE41-LABEL: test_v8f32:
134 ; SSE41-NEXT: addps %xmm2, %xmm1
135 ; SSE41-NEXT: movaps %xmm1, %xmm2
136 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
137 ; SSE41-NEXT: addps %xmm1, %xmm2
138 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
139 ; SSE41-NEXT: addss %xmm2, %xmm1
140 ; SSE41-NEXT: addss %xmm1, %xmm0
143 ; AVX1-SLOW-LABEL: test_v8f32:
144 ; AVX1-SLOW: # %bb.0:
145 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
146 ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
147 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
148 ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
149 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
150 ; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
151 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
152 ; AVX1-SLOW-NEXT: vzeroupper
153 ; AVX1-SLOW-NEXT: retq
155 ; AVX1-FAST-LABEL: test_v8f32:
156 ; AVX1-FAST: # %bb.0:
157 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
158 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1
159 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
160 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
161 ; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
162 ; AVX1-FAST-NEXT: vzeroupper
163 ; AVX1-FAST-NEXT: retq
165 ; AVX2-LABEL: test_v8f32:
167 ; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
168 ; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
169 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
170 ; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
171 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
172 ; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
173 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
174 ; AVX2-NEXT: vzeroupper
177 ; AVX512-LABEL: test_v8f32:
179 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
180 ; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
181 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
182 ; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
183 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
184 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
185 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
186 ; AVX512-NEXT: vzeroupper
188 %1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float %a0, <8 x float> %a1)
192 define float @test_v16f32(float %a0, <16 x float> %a1) {
193 ; SSE2-LABEL: test_v16f32:
195 ; SSE2-NEXT: addps %xmm4, %xmm2
196 ; SSE2-NEXT: addps %xmm3, %xmm1
197 ; SSE2-NEXT: addps %xmm2, %xmm1
198 ; SSE2-NEXT: movaps %xmm1, %xmm2
199 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
200 ; SSE2-NEXT: addps %xmm1, %xmm2
201 ; SSE2-NEXT: movaps %xmm2, %xmm1
202 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
203 ; SSE2-NEXT: addss %xmm2, %xmm1
204 ; SSE2-NEXT: addss %xmm1, %xmm0
207 ; SSE41-LABEL: test_v16f32:
209 ; SSE41-NEXT: addps %xmm4, %xmm2
210 ; SSE41-NEXT: addps %xmm3, %xmm1
211 ; SSE41-NEXT: addps %xmm2, %xmm1
212 ; SSE41-NEXT: movaps %xmm1, %xmm2
213 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
214 ; SSE41-NEXT: addps %xmm1, %xmm2
215 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
216 ; SSE41-NEXT: addss %xmm2, %xmm1
217 ; SSE41-NEXT: addss %xmm1, %xmm0
220 ; AVX1-SLOW-LABEL: test_v16f32:
221 ; AVX1-SLOW: # %bb.0:
222 ; AVX1-SLOW-NEXT: vaddps %ymm2, %ymm1, %ymm1
223 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
224 ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
225 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
226 ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
227 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
228 ; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
229 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
230 ; AVX1-SLOW-NEXT: vzeroupper
231 ; AVX1-SLOW-NEXT: retq
233 ; AVX1-FAST-LABEL: test_v16f32:
234 ; AVX1-FAST: # %bb.0:
235 ; AVX1-FAST-NEXT: vaddps %ymm2, %ymm1, %ymm1
236 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
237 ; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
238 ; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
239 ; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
240 ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
241 ; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
242 ; AVX1-FAST-NEXT: vzeroupper
243 ; AVX1-FAST-NEXT: retq
245 ; AVX2-LABEL: test_v16f32:
247 ; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
248 ; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
249 ; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
250 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
251 ; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
252 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
253 ; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
254 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
255 ; AVX2-NEXT: vzeroupper
258 ; AVX512-LABEL: test_v16f32:
260 ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
261 ; AVX512-NEXT: vaddps %zmm2, %zmm1, %zmm1
262 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
263 ; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
264 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
265 ; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
266 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
267 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
268 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
269 ; AVX512-NEXT: vzeroupper
271 %1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float %a0, <16 x float> %a1)
279 define float @test_v2f32_zero(<2 x float> %a0) {
280 ; SSE2-LABEL: test_v2f32_zero:
282 ; SSE2-NEXT: movaps %xmm0, %xmm1
283 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
284 ; SSE2-NEXT: addss %xmm0, %xmm1
285 ; SSE2-NEXT: movaps %xmm1, %xmm0
288 ; SSE41-LABEL: test_v2f32_zero:
290 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
291 ; SSE41-NEXT: addss %xmm1, %xmm0
294 ; AVX1-SLOW-LABEL: test_v2f32_zero:
295 ; AVX1-SLOW: # %bb.0:
296 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
297 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
298 ; AVX1-SLOW-NEXT: retq
300 ; AVX1-FAST-LABEL: test_v2f32_zero:
301 ; AVX1-FAST: # %bb.0:
302 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
303 ; AVX1-FAST-NEXT: retq
305 ; AVX2-LABEL: test_v2f32_zero:
307 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
308 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
311 ; AVX512-LABEL: test_v2f32_zero:
313 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
314 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
316 %1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
320 define float @test_v4f32_zero(<4 x float> %a0) {
321 ; SSE2-LABEL: test_v4f32_zero:
323 ; SSE2-NEXT: movaps %xmm0, %xmm1
324 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
325 ; SSE2-NEXT: addps %xmm0, %xmm1
326 ; SSE2-NEXT: movaps %xmm1, %xmm0
327 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
328 ; SSE2-NEXT: addss %xmm1, %xmm0
331 ; SSE41-LABEL: test_v4f32_zero:
333 ; SSE41-NEXT: movaps %xmm0, %xmm1
334 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
335 ; SSE41-NEXT: addps %xmm0, %xmm1
336 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
337 ; SSE41-NEXT: addss %xmm0, %xmm1
338 ; SSE41-NEXT: movaps %xmm1, %xmm0
341 ; AVX1-SLOW-LABEL: test_v4f32_zero:
342 ; AVX1-SLOW: # %bb.0:
343 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
344 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
345 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
346 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
347 ; AVX1-SLOW-NEXT: retq
349 ; AVX1-FAST-LABEL: test_v4f32_zero:
350 ; AVX1-FAST: # %bb.0:
351 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
352 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
353 ; AVX1-FAST-NEXT: retq
355 ; AVX2-LABEL: test_v4f32_zero:
357 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
358 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
359 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
360 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
363 ; AVX512-LABEL: test_v4f32_zero:
365 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
366 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
367 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
368 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
370 %1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
374 define float @test_v8f32_zero(<8 x float> %a0) {
375 ; SSE2-LABEL: test_v8f32_zero:
377 ; SSE2-NEXT: addps %xmm1, %xmm0
378 ; SSE2-NEXT: movaps %xmm0, %xmm1
379 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
380 ; SSE2-NEXT: addps %xmm0, %xmm1
381 ; SSE2-NEXT: movaps %xmm1, %xmm0
382 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
383 ; SSE2-NEXT: addss %xmm1, %xmm0
386 ; SSE41-LABEL: test_v8f32_zero:
388 ; SSE41-NEXT: addps %xmm1, %xmm0
389 ; SSE41-NEXT: movaps %xmm0, %xmm1
390 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
391 ; SSE41-NEXT: addps %xmm0, %xmm1
392 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
393 ; SSE41-NEXT: addss %xmm0, %xmm1
394 ; SSE41-NEXT: movaps %xmm1, %xmm0
397 ; AVX1-SLOW-LABEL: test_v8f32_zero:
398 ; AVX1-SLOW: # %bb.0:
399 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
400 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
401 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
402 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
403 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
404 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
405 ; AVX1-SLOW-NEXT: vzeroupper
406 ; AVX1-SLOW-NEXT: retq
408 ; AVX1-FAST-LABEL: test_v8f32_zero:
409 ; AVX1-FAST: # %bb.0:
410 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
411 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
412 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
413 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
414 ; AVX1-FAST-NEXT: vzeroupper
415 ; AVX1-FAST-NEXT: retq
417 ; AVX2-LABEL: test_v8f32_zero:
419 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
420 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
421 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
422 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
423 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
424 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
425 ; AVX2-NEXT: vzeroupper
428 ; AVX512-LABEL: test_v8f32_zero:
430 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
431 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
432 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
433 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
434 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
435 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
436 ; AVX512-NEXT: vzeroupper
438 %1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
442 define float @test_v16f32_zero(<16 x float> %a0) {
443 ; SSE2-LABEL: test_v16f32_zero:
445 ; SSE2-NEXT: addps %xmm3, %xmm1
446 ; SSE2-NEXT: addps %xmm2, %xmm0
447 ; SSE2-NEXT: addps %xmm1, %xmm0
448 ; SSE2-NEXT: movaps %xmm0, %xmm1
449 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
450 ; SSE2-NEXT: addps %xmm0, %xmm1
451 ; SSE2-NEXT: movaps %xmm1, %xmm0
452 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
453 ; SSE2-NEXT: addss %xmm1, %xmm0
456 ; SSE41-LABEL: test_v16f32_zero:
458 ; SSE41-NEXT: addps %xmm3, %xmm1
459 ; SSE41-NEXT: addps %xmm2, %xmm0
460 ; SSE41-NEXT: addps %xmm1, %xmm0
461 ; SSE41-NEXT: movaps %xmm0, %xmm1
462 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
463 ; SSE41-NEXT: addps %xmm0, %xmm1
464 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
465 ; SSE41-NEXT: addss %xmm0, %xmm1
466 ; SSE41-NEXT: movaps %xmm1, %xmm0
469 ; AVX1-SLOW-LABEL: test_v16f32_zero:
470 ; AVX1-SLOW: # %bb.0:
471 ; AVX1-SLOW-NEXT: vaddps %ymm1, %ymm0, %ymm0
472 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
473 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
474 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
475 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
476 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
477 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
478 ; AVX1-SLOW-NEXT: vzeroupper
479 ; AVX1-SLOW-NEXT: retq
481 ; AVX1-FAST-LABEL: test_v16f32_zero:
482 ; AVX1-FAST: # %bb.0:
483 ; AVX1-FAST-NEXT: vaddps %ymm1, %ymm0, %ymm0
484 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
485 ; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
486 ; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
487 ; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
488 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
489 ; AVX1-FAST-NEXT: vzeroupper
490 ; AVX1-FAST-NEXT: retq
492 ; AVX2-LABEL: test_v16f32_zero:
494 ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
495 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
496 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
497 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
498 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
499 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
500 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
501 ; AVX2-NEXT: vzeroupper
504 ; AVX512-LABEL: test_v16f32_zero:
506 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
507 ; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
508 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
509 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
510 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
511 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
512 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
513 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
514 ; AVX512-NEXT: vzeroupper
516 %1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
524 define float @test_v2f32_undef(<2 x float> %a0) {
525 ; SSE2-LABEL: test_v2f32_undef:
527 ; SSE2-NEXT: movaps %xmm0, %xmm1
528 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
529 ; SSE2-NEXT: addss %xmm0, %xmm1
530 ; SSE2-NEXT: movaps %xmm1, %xmm0
533 ; SSE41-LABEL: test_v2f32_undef:
535 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
536 ; SSE41-NEXT: addss %xmm1, %xmm0
539 ; AVX1-SLOW-LABEL: test_v2f32_undef:
540 ; AVX1-SLOW: # %bb.0:
541 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
542 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
543 ; AVX1-SLOW-NEXT: retq
545 ; AVX1-FAST-LABEL: test_v2f32_undef:
546 ; AVX1-FAST: # %bb.0:
547 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
548 ; AVX1-FAST-NEXT: retq
550 ; AVX2-LABEL: test_v2f32_undef:
552 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
553 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
556 ; AVX512-LABEL: test_v2f32_undef:
558 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
559 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
561 %1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
565 define float @test_v4f32_undef(<4 x float> %a0) {
566 ; SSE2-LABEL: test_v4f32_undef:
568 ; SSE2-NEXT: movaps %xmm0, %xmm1
569 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
570 ; SSE2-NEXT: addps %xmm0, %xmm1
571 ; SSE2-NEXT: movaps %xmm1, %xmm0
572 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
573 ; SSE2-NEXT: addss %xmm1, %xmm0
576 ; SSE41-LABEL: test_v4f32_undef:
578 ; SSE41-NEXT: movaps %xmm0, %xmm1
579 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
580 ; SSE41-NEXT: addps %xmm0, %xmm1
581 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
582 ; SSE41-NEXT: addss %xmm0, %xmm1
583 ; SSE41-NEXT: movaps %xmm1, %xmm0
586 ; AVX1-SLOW-LABEL: test_v4f32_undef:
587 ; AVX1-SLOW: # %bb.0:
588 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
589 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
590 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
591 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
592 ; AVX1-SLOW-NEXT: retq
594 ; AVX1-FAST-LABEL: test_v4f32_undef:
595 ; AVX1-FAST: # %bb.0:
596 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
597 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
598 ; AVX1-FAST-NEXT: retq
600 ; AVX2-LABEL: test_v4f32_undef:
602 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
603 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
604 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
605 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
608 ; AVX512-LABEL: test_v4f32_undef:
610 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
611 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
612 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
613 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
615 %1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
619 define float @test_v8f32_undef(<8 x float> %a0) {
620 ; SSE2-LABEL: test_v8f32_undef:
622 ; SSE2-NEXT: addps %xmm1, %xmm0
623 ; SSE2-NEXT: movaps %xmm0, %xmm1
624 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
625 ; SSE2-NEXT: addps %xmm0, %xmm1
626 ; SSE2-NEXT: movaps %xmm1, %xmm0
627 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
628 ; SSE2-NEXT: addss %xmm1, %xmm0
631 ; SSE41-LABEL: test_v8f32_undef:
633 ; SSE41-NEXT: addps %xmm1, %xmm0
634 ; SSE41-NEXT: movaps %xmm0, %xmm1
635 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
636 ; SSE41-NEXT: addps %xmm0, %xmm1
637 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
638 ; SSE41-NEXT: addss %xmm0, %xmm1
639 ; SSE41-NEXT: movaps %xmm1, %xmm0
642 ; AVX1-SLOW-LABEL: test_v8f32_undef:
643 ; AVX1-SLOW: # %bb.0:
644 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
645 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
646 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
647 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
648 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
649 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
650 ; AVX1-SLOW-NEXT: vzeroupper
651 ; AVX1-SLOW-NEXT: retq
653 ; AVX1-FAST-LABEL: test_v8f32_undef:
654 ; AVX1-FAST: # %bb.0:
655 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
656 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
657 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
658 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
659 ; AVX1-FAST-NEXT: vzeroupper
660 ; AVX1-FAST-NEXT: retq
662 ; AVX2-LABEL: test_v8f32_undef:
664 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
665 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
666 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
667 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
668 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
669 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
670 ; AVX2-NEXT: vzeroupper
673 ; AVX512-LABEL: test_v8f32_undef:
675 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
676 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
677 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
678 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
679 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
680 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
681 ; AVX512-NEXT: vzeroupper
683 %1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
687 define float @test_v16f32_undef(<16 x float> %a0) {
688 ; SSE2-LABEL: test_v16f32_undef:
690 ; SSE2-NEXT: addps %xmm3, %xmm1
691 ; SSE2-NEXT: addps %xmm2, %xmm0
692 ; SSE2-NEXT: addps %xmm1, %xmm0
693 ; SSE2-NEXT: movaps %xmm0, %xmm1
694 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
695 ; SSE2-NEXT: addps %xmm0, %xmm1
696 ; SSE2-NEXT: movaps %xmm1, %xmm0
697 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
698 ; SSE2-NEXT: addss %xmm1, %xmm0
701 ; SSE41-LABEL: test_v16f32_undef:
703 ; SSE41-NEXT: addps %xmm3, %xmm1
704 ; SSE41-NEXT: addps %xmm2, %xmm0
705 ; SSE41-NEXT: addps %xmm1, %xmm0
706 ; SSE41-NEXT: movaps %xmm0, %xmm1
707 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
708 ; SSE41-NEXT: addps %xmm0, %xmm1
709 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
710 ; SSE41-NEXT: addss %xmm0, %xmm1
711 ; SSE41-NEXT: movaps %xmm1, %xmm0
714 ; AVX1-SLOW-LABEL: test_v16f32_undef:
715 ; AVX1-SLOW: # %bb.0:
716 ; AVX1-SLOW-NEXT: vaddps %ymm1, %ymm0, %ymm0
717 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
718 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
719 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
720 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
721 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
722 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
723 ; AVX1-SLOW-NEXT: vzeroupper
724 ; AVX1-SLOW-NEXT: retq
726 ; AVX1-FAST-LABEL: test_v16f32_undef:
727 ; AVX1-FAST: # %bb.0:
728 ; AVX1-FAST-NEXT: vaddps %ymm1, %ymm0, %ymm0
729 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
730 ; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
731 ; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
732 ; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
733 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
734 ; AVX1-FAST-NEXT: vzeroupper
735 ; AVX1-FAST-NEXT: retq
737 ; AVX2-LABEL: test_v16f32_undef:
739 ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
740 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
741 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
742 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
743 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
744 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
745 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
746 ; AVX2-NEXT: vzeroupper
749 ; AVX512-LABEL: test_v16f32_undef:
751 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
752 ; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
753 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
754 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
755 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
756 ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
757 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
758 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
759 ; AVX512-NEXT: vzeroupper
761 %1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
769 define double @test_v2f64(double %a0, <2 x double> %a1) {
770 ; SSE-LABEL: test_v2f64:
772 ; SSE-NEXT: movapd %xmm1, %xmm2
773 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
774 ; SSE-NEXT: addsd %xmm1, %xmm2
775 ; SSE-NEXT: addsd %xmm2, %xmm0
778 ; AVX1-SLOW-LABEL: test_v2f64:
779 ; AVX1-SLOW: # %bb.0:
780 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
781 ; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
782 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
783 ; AVX1-SLOW-NEXT: retq
785 ; AVX1-FAST-LABEL: test_v2f64:
786 ; AVX1-FAST: # %bb.0:
787 ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
788 ; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
789 ; AVX1-FAST-NEXT: retq
791 ; AVX2-LABEL: test_v2f64:
793 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
794 ; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
795 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
798 ; AVX512-LABEL: test_v2f64:
800 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
801 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
802 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
804 %1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double %a0, <2 x double> %a1)
808 define double @test_v4f64(double %a0, <4 x double> %a1) {
809 ; SSE-LABEL: test_v4f64:
811 ; SSE-NEXT: addpd %xmm2, %xmm1
812 ; SSE-NEXT: movapd %xmm1, %xmm2
813 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
814 ; SSE-NEXT: addsd %xmm1, %xmm2
815 ; SSE-NEXT: addsd %xmm2, %xmm0
818 ; AVX1-SLOW-LABEL: test_v4f64:
819 ; AVX1-SLOW: # %bb.0:
820 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
821 ; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
822 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
823 ; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
824 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
825 ; AVX1-SLOW-NEXT: vzeroupper
826 ; AVX1-SLOW-NEXT: retq
828 ; AVX1-FAST-LABEL: test_v4f64:
829 ; AVX1-FAST: # %bb.0:
830 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
831 ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm2, %xmm1
832 ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
833 ; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
834 ; AVX1-FAST-NEXT: vzeroupper
835 ; AVX1-FAST-NEXT: retq
837 ; AVX2-LABEL: test_v4f64:
839 ; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
840 ; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
841 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
842 ; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
843 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
844 ; AVX2-NEXT: vzeroupper
847 ; AVX512-LABEL: test_v4f64:
849 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
850 ; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
851 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
852 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
853 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
854 ; AVX512-NEXT: vzeroupper
856 %1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double %a0, <4 x double> %a1)
860 define double @test_v8f64(double %a0, <8 x double> %a1) {
861 ; SSE-LABEL: test_v8f64:
863 ; SSE-NEXT: addpd %xmm4, %xmm2
864 ; SSE-NEXT: addpd %xmm3, %xmm1
865 ; SSE-NEXT: addpd %xmm2, %xmm1
866 ; SSE-NEXT: movapd %xmm1, %xmm2
867 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
868 ; SSE-NEXT: addsd %xmm1, %xmm2
869 ; SSE-NEXT: addsd %xmm2, %xmm0
872 ; AVX1-SLOW-LABEL: test_v8f64:
873 ; AVX1-SLOW: # %bb.0:
874 ; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm1, %ymm1
875 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
876 ; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
877 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
878 ; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
879 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
880 ; AVX1-SLOW-NEXT: vzeroupper
881 ; AVX1-SLOW-NEXT: retq
883 ; AVX1-FAST-LABEL: test_v8f64:
884 ; AVX1-FAST: # %bb.0:
885 ; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm1, %ymm1
886 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
887 ; AVX1-FAST-NEXT: vaddpd %xmm2, %xmm1, %xmm1
888 ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
889 ; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
890 ; AVX1-FAST-NEXT: vzeroupper
891 ; AVX1-FAST-NEXT: retq
893 ; AVX2-LABEL: test_v8f64:
895 ; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
896 ; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
897 ; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
898 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
899 ; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
900 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
901 ; AVX2-NEXT: vzeroupper
904 ; AVX512-LABEL: test_v8f64:
906 ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
907 ; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
908 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
909 ; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
910 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
911 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
912 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
913 ; AVX512-NEXT: vzeroupper
915 %1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double %a0, <8 x double> %a1)
919 define double @test_v16f64(double %a0, <16 x double> %a1) {
920 ; SSE-LABEL: test_v16f64:
922 ; SSE-NEXT: addpd %xmm6, %xmm2
923 ; SSE-NEXT: addpd %xmm7, %xmm3
924 ; SSE-NEXT: addpd %xmm5, %xmm1
925 ; SSE-NEXT: addpd %xmm3, %xmm1
926 ; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm4
927 ; SSE-NEXT: addpd %xmm2, %xmm4
928 ; SSE-NEXT: addpd %xmm1, %xmm4
929 ; SSE-NEXT: movapd %xmm4, %xmm1
930 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
931 ; SSE-NEXT: addsd %xmm4, %xmm1
932 ; SSE-NEXT: addsd %xmm1, %xmm0
935 ; AVX1-SLOW-LABEL: test_v16f64:
936 ; AVX1-SLOW: # %bb.0:
937 ; AVX1-SLOW-NEXT: vaddpd %ymm4, %ymm2, %ymm2
938 ; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
939 ; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm1, %ymm1
940 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
941 ; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
942 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
943 ; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
944 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
945 ; AVX1-SLOW-NEXT: vzeroupper
946 ; AVX1-SLOW-NEXT: retq
948 ; AVX1-FAST-LABEL: test_v16f64:
949 ; AVX1-FAST: # %bb.0:
950 ; AVX1-FAST-NEXT: vaddpd %ymm4, %ymm2, %ymm2
951 ; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
952 ; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm1, %ymm1
953 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
954 ; AVX1-FAST-NEXT: vaddpd %xmm2, %xmm1, %xmm1
955 ; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
956 ; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
957 ; AVX1-FAST-NEXT: vzeroupper
958 ; AVX1-FAST-NEXT: retq
960 ; AVX2-LABEL: test_v16f64:
962 ; AVX2-NEXT: vaddpd %ymm4, %ymm2, %ymm2
963 ; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
964 ; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
965 ; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
966 ; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
967 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
968 ; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
969 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
970 ; AVX2-NEXT: vzeroupper
973 ; AVX512-LABEL: test_v16f64:
975 ; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
976 ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
977 ; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
978 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
979 ; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
980 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
981 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
982 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
983 ; AVX512-NEXT: vzeroupper
985 %1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double %a0, <16 x double> %a1)
993 define double @test_v2f64_zero(<2 x double> %a0) {
994 ; SSE-LABEL: test_v2f64_zero:
996 ; SSE-NEXT: movapd %xmm0, %xmm1
997 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
998 ; SSE-NEXT: addsd %xmm0, %xmm1
999 ; SSE-NEXT: movapd %xmm1, %xmm0
1002 ; AVX1-SLOW-LABEL: test_v2f64_zero:
1003 ; AVX1-SLOW: # %bb.0:
1004 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1005 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1006 ; AVX1-SLOW-NEXT: retq
1008 ; AVX1-FAST-LABEL: test_v2f64_zero:
1009 ; AVX1-FAST: # %bb.0:
1010 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1011 ; AVX1-FAST-NEXT: retq
1013 ; AVX2-LABEL: test_v2f64_zero:
1015 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1016 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1019 ; AVX512-LABEL: test_v2f64_zero:
1021 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1022 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1024 %1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
1028 define double @test_v4f64_zero(<4 x double> %a0) {
1029 ; SSE-LABEL: test_v4f64_zero:
1031 ; SSE-NEXT: addpd %xmm1, %xmm0
1032 ; SSE-NEXT: movapd %xmm0, %xmm1
1033 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1034 ; SSE-NEXT: addsd %xmm0, %xmm1
1035 ; SSE-NEXT: movapd %xmm1, %xmm0
1038 ; AVX1-SLOW-LABEL: test_v4f64_zero:
1039 ; AVX1-SLOW: # %bb.0:
1040 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1041 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1042 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1043 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1044 ; AVX1-SLOW-NEXT: vzeroupper
1045 ; AVX1-SLOW-NEXT: retq
1047 ; AVX1-FAST-LABEL: test_v4f64_zero:
1048 ; AVX1-FAST: # %bb.0:
1049 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1050 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
1051 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1052 ; AVX1-FAST-NEXT: vzeroupper
1053 ; AVX1-FAST-NEXT: retq
1055 ; AVX2-LABEL: test_v4f64_zero:
1057 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1058 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1059 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1060 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1061 ; AVX2-NEXT: vzeroupper
1064 ; AVX512-LABEL: test_v4f64_zero:
1066 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1067 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1068 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1069 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1070 ; AVX512-NEXT: vzeroupper
1072 %1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
1076 define double @test_v8f64_zero(<8 x double> %a0) {
1077 ; SSE-LABEL: test_v8f64_zero:
1079 ; SSE-NEXT: addpd %xmm3, %xmm1
1080 ; SSE-NEXT: addpd %xmm2, %xmm0
1081 ; SSE-NEXT: addpd %xmm1, %xmm0
1082 ; SSE-NEXT: movapd %xmm0, %xmm1
1083 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1084 ; SSE-NEXT: addsd %xmm0, %xmm1
1085 ; SSE-NEXT: movapd %xmm1, %xmm0
1088 ; AVX1-SLOW-LABEL: test_v8f64_zero:
1089 ; AVX1-SLOW: # %bb.0:
1090 ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1091 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1092 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1093 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1094 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1095 ; AVX1-SLOW-NEXT: vzeroupper
1096 ; AVX1-SLOW-NEXT: retq
1098 ; AVX1-FAST-LABEL: test_v8f64_zero:
1099 ; AVX1-FAST: # %bb.0:
1100 ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1101 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1102 ; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1103 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1104 ; AVX1-FAST-NEXT: vzeroupper
1105 ; AVX1-FAST-NEXT: retq
1107 ; AVX2-LABEL: test_v8f64_zero:
1109 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1110 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1111 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1112 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1113 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1114 ; AVX2-NEXT: vzeroupper
1117 ; AVX512-LABEL: test_v8f64_zero:
1119 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
1120 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1121 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1122 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1123 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1124 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1125 ; AVX512-NEXT: vzeroupper
1127 %1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
1131 define double @test_v16f64_zero(<16 x double> %a0) {
1132 ; SSE-LABEL: test_v16f64_zero:
1134 ; SSE-NEXT: addpd %xmm6, %xmm2
1135 ; SSE-NEXT: addpd %xmm4, %xmm0
1136 ; SSE-NEXT: addpd %xmm2, %xmm0
1137 ; SSE-NEXT: addpd %xmm7, %xmm3
1138 ; SSE-NEXT: addpd %xmm5, %xmm1
1139 ; SSE-NEXT: addpd %xmm3, %xmm1
1140 ; SSE-NEXT: addpd %xmm0, %xmm1
1141 ; SSE-NEXT: movapd %xmm1, %xmm0
1142 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1143 ; SSE-NEXT: addsd %xmm1, %xmm0
1146 ; AVX1-SLOW-LABEL: test_v16f64_zero:
1147 ; AVX1-SLOW: # %bb.0:
1148 ; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1149 ; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1150 ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1151 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1152 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1153 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1154 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1155 ; AVX1-SLOW-NEXT: vzeroupper
1156 ; AVX1-SLOW-NEXT: retq
1158 ; AVX1-FAST-LABEL: test_v16f64_zero:
1159 ; AVX1-FAST: # %bb.0:
1160 ; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1161 ; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1162 ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1163 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1164 ; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1165 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1166 ; AVX1-FAST-NEXT: vzeroupper
1167 ; AVX1-FAST-NEXT: retq
1169 ; AVX2-LABEL: test_v16f64_zero:
1171 ; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1172 ; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1173 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1174 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1175 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1176 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1177 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1178 ; AVX2-NEXT: vzeroupper
1181 ; AVX512-LABEL: test_v16f64_zero:
1183 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1184 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
1185 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1186 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1187 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1188 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1189 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1190 ; AVX512-NEXT: vzeroupper
1192 %1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
1200 define double @test_v2f64_undef(<2 x double> %a0) {
1201 ; SSE-LABEL: test_v2f64_undef:
1203 ; SSE-NEXT: movapd %xmm0, %xmm1
1204 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1205 ; SSE-NEXT: addsd %xmm0, %xmm1
1206 ; SSE-NEXT: movapd %xmm1, %xmm0
1209 ; AVX1-SLOW-LABEL: test_v2f64_undef:
1210 ; AVX1-SLOW: # %bb.0:
1211 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1212 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1213 ; AVX1-SLOW-NEXT: retq
1215 ; AVX1-FAST-LABEL: test_v2f64_undef:
1216 ; AVX1-FAST: # %bb.0:
1217 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1218 ; AVX1-FAST-NEXT: retq
1220 ; AVX2-LABEL: test_v2f64_undef:
1222 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1223 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1226 ; AVX512-LABEL: test_v2f64_undef:
1228 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1229 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1231 %1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
1235 define double @test_v4f64_undef(<4 x double> %a0) {
1236 ; SSE-LABEL: test_v4f64_undef:
1238 ; SSE-NEXT: addpd %xmm1, %xmm0
1239 ; SSE-NEXT: movapd %xmm0, %xmm1
1240 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1241 ; SSE-NEXT: addsd %xmm0, %xmm1
1242 ; SSE-NEXT: movapd %xmm1, %xmm0
1245 ; AVX1-SLOW-LABEL: test_v4f64_undef:
1246 ; AVX1-SLOW: # %bb.0:
1247 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1248 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1249 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1250 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1251 ; AVX1-SLOW-NEXT: vzeroupper
1252 ; AVX1-SLOW-NEXT: retq
1254 ; AVX1-FAST-LABEL: test_v4f64_undef:
1255 ; AVX1-FAST: # %bb.0:
1256 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1257 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
1258 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1259 ; AVX1-FAST-NEXT: vzeroupper
1260 ; AVX1-FAST-NEXT: retq
1262 ; AVX2-LABEL: test_v4f64_undef:
1264 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1265 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1266 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1267 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1268 ; AVX2-NEXT: vzeroupper
1271 ; AVX512-LABEL: test_v4f64_undef:
1273 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1274 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1275 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1276 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1277 ; AVX512-NEXT: vzeroupper
1279 %1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
1283 define double @test_v8f64_undef(<8 x double> %a0) {
1284 ; SSE-LABEL: test_v8f64_undef:
1286 ; SSE-NEXT: addpd %xmm3, %xmm1
1287 ; SSE-NEXT: addpd %xmm2, %xmm0
1288 ; SSE-NEXT: addpd %xmm1, %xmm0
1289 ; SSE-NEXT: movapd %xmm0, %xmm1
1290 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1291 ; SSE-NEXT: addsd %xmm0, %xmm1
1292 ; SSE-NEXT: movapd %xmm1, %xmm0
1295 ; AVX1-SLOW-LABEL: test_v8f64_undef:
1296 ; AVX1-SLOW: # %bb.0:
1297 ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1298 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1299 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1300 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1301 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1302 ; AVX1-SLOW-NEXT: vzeroupper
1303 ; AVX1-SLOW-NEXT: retq
1305 ; AVX1-FAST-LABEL: test_v8f64_undef:
1306 ; AVX1-FAST: # %bb.0:
1307 ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1308 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1309 ; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1310 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1311 ; AVX1-FAST-NEXT: vzeroupper
1312 ; AVX1-FAST-NEXT: retq
1314 ; AVX2-LABEL: test_v8f64_undef:
1316 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1317 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1318 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1319 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1320 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1321 ; AVX2-NEXT: vzeroupper
1324 ; AVX512-LABEL: test_v8f64_undef:
1326 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
1327 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1328 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1329 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1330 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1331 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1332 ; AVX512-NEXT: vzeroupper
1334 %1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
1338 define double @test_v16f64_undef(<16 x double> %a0) {
1339 ; SSE-LABEL: test_v16f64_undef:
1341 ; SSE-NEXT: addpd %xmm6, %xmm2
1342 ; SSE-NEXT: addpd %xmm4, %xmm0
1343 ; SSE-NEXT: addpd %xmm2, %xmm0
1344 ; SSE-NEXT: addpd %xmm7, %xmm3
1345 ; SSE-NEXT: addpd %xmm5, %xmm1
1346 ; SSE-NEXT: addpd %xmm3, %xmm1
1347 ; SSE-NEXT: addpd %xmm0, %xmm1
1348 ; SSE-NEXT: movapd %xmm1, %xmm0
1349 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1350 ; SSE-NEXT: addsd %xmm1, %xmm0
1353 ; AVX1-SLOW-LABEL: test_v16f64_undef:
1354 ; AVX1-SLOW: # %bb.0:
1355 ; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1356 ; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1357 ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1358 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
1359 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1360 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1361 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1362 ; AVX1-SLOW-NEXT: vzeroupper
1363 ; AVX1-SLOW-NEXT: retq
1365 ; AVX1-FAST-LABEL: test_v16f64_undef:
1366 ; AVX1-FAST: # %bb.0:
1367 ; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1368 ; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1369 ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1370 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
1371 ; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1372 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
1373 ; AVX1-FAST-NEXT: vzeroupper
1374 ; AVX1-FAST-NEXT: retq
1376 ; AVX2-LABEL: test_v16f64_undef:
1378 ; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
1379 ; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
1380 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
1381 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
1382 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1383 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1384 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1385 ; AVX2-NEXT: vzeroupper
1388 ; AVX512-LABEL: test_v16f64_undef:
1390 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1391 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
1392 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
1393 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
1394 ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
1395 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1396 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1397 ; AVX512-NEXT: vzeroupper
1399 %1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
1403 declare float @llvm.vector.reduce.fadd.f32.v2f32(float, <2 x float>)
1404 declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
1405 declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
1406 declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
1408 declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
1409 declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>)
1410 declare double @llvm.vector.reduce.fadd.f64.v8f64(double, <8 x double>)
1411 declare double @llvm.vector.reduce.fadd.f64.v16f64(double, <16 x double>)