1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
14 define float @test_v2f32(float %a0, <2 x float> %a1) {
15 ; SSE2-LABEL: test_v2f32:
17 ; SSE2-NEXT: addss %xmm1, %xmm0
18 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
19 ; SSE2-NEXT: addss %xmm1, %xmm0
22 ; SSE41-LABEL: test_v2f32:
24 ; SSE41-NEXT: addss %xmm1, %xmm0
25 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
26 ; SSE41-NEXT: addss %xmm1, %xmm0
29 ; AVX-LABEL: test_v2f32:
31 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
32 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
33 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
36 ; AVX512-LABEL: test_v2f32:
38 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
39 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
40 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
42 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float %a0, <2 x float> %a1)
46 define float @test_v4f32(float %a0, <4 x float> %a1) {
47 ; SSE2-LABEL: test_v4f32:
49 ; SSE2-NEXT: addss %xmm1, %xmm0
50 ; SSE2-NEXT: movaps %xmm1, %xmm2
51 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
52 ; SSE2-NEXT: addss %xmm2, %xmm0
53 ; SSE2-NEXT: movaps %xmm1, %xmm2
54 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
55 ; SSE2-NEXT: addss %xmm2, %xmm0
56 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
57 ; SSE2-NEXT: addss %xmm1, %xmm0
60 ; SSE41-LABEL: test_v4f32:
62 ; SSE41-NEXT: addss %xmm1, %xmm0
63 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
64 ; SSE41-NEXT: addss %xmm2, %xmm0
65 ; SSE41-NEXT: movaps %xmm1, %xmm2
66 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
67 ; SSE41-NEXT: addss %xmm2, %xmm0
68 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
69 ; SSE41-NEXT: addss %xmm1, %xmm0
72 ; AVX-LABEL: test_v4f32:
74 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
75 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
76 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
77 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
78 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
79 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
80 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
83 ; AVX512-LABEL: test_v4f32:
85 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
86 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
87 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
88 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
89 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
90 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
91 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
93 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float %a0, <4 x float> %a1)
97 define float @test_v8f32(float %a0, <8 x float> %a1) {
98 ; SSE2-LABEL: test_v8f32:
100 ; SSE2-NEXT: addss %xmm1, %xmm0
101 ; SSE2-NEXT: movaps %xmm1, %xmm3
102 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[2,3]
103 ; SSE2-NEXT: addss %xmm3, %xmm0
104 ; SSE2-NEXT: movaps %xmm1, %xmm3
105 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
106 ; SSE2-NEXT: addss %xmm3, %xmm0
107 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
108 ; SSE2-NEXT: addss %xmm1, %xmm0
109 ; SSE2-NEXT: addss %xmm2, %xmm0
110 ; SSE2-NEXT: movaps %xmm2, %xmm1
111 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
112 ; SSE2-NEXT: addss %xmm1, %xmm0
113 ; SSE2-NEXT: movaps %xmm2, %xmm1
114 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
115 ; SSE2-NEXT: addss %xmm1, %xmm0
116 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
117 ; SSE2-NEXT: addss %xmm2, %xmm0
120 ; SSE41-LABEL: test_v8f32:
122 ; SSE41-NEXT: addss %xmm1, %xmm0
123 ; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
124 ; SSE41-NEXT: addss %xmm3, %xmm0
125 ; SSE41-NEXT: movaps %xmm1, %xmm3
126 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
127 ; SSE41-NEXT: addss %xmm3, %xmm0
128 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
129 ; SSE41-NEXT: addss %xmm1, %xmm0
130 ; SSE41-NEXT: addss %xmm2, %xmm0
131 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
132 ; SSE41-NEXT: addss %xmm1, %xmm0
133 ; SSE41-NEXT: movaps %xmm2, %xmm1
134 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
135 ; SSE41-NEXT: addss %xmm1, %xmm0
136 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
137 ; SSE41-NEXT: addss %xmm2, %xmm0
140 ; AVX-LABEL: test_v8f32:
142 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
143 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
144 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
145 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
146 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
147 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
148 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
149 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
150 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
151 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
152 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
153 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
154 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
155 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
156 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
157 ; AVX-NEXT: vzeroupper
160 ; AVX512-LABEL: test_v8f32:
162 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
163 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
164 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
165 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
166 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
167 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
168 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
169 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1
170 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
171 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
172 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
173 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
174 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
175 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
176 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
177 ; AVX512-NEXT: vzeroupper
179 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float %a0, <8 x float> %a1)
183 define float @test_v16f32(float %a0, <16 x float> %a1) {
184 ; SSE2-LABEL: test_v16f32:
186 ; SSE2-NEXT: addss %xmm1, %xmm0
187 ; SSE2-NEXT: movaps %xmm1, %xmm5
188 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[2,3]
189 ; SSE2-NEXT: addss %xmm5, %xmm0
190 ; SSE2-NEXT: movaps %xmm1, %xmm5
191 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
192 ; SSE2-NEXT: addss %xmm5, %xmm0
193 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
194 ; SSE2-NEXT: addss %xmm1, %xmm0
195 ; SSE2-NEXT: addss %xmm2, %xmm0
196 ; SSE2-NEXT: movaps %xmm2, %xmm1
197 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
198 ; SSE2-NEXT: addss %xmm1, %xmm0
199 ; SSE2-NEXT: movaps %xmm2, %xmm1
200 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
201 ; SSE2-NEXT: addss %xmm1, %xmm0
202 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
203 ; SSE2-NEXT: addss %xmm2, %xmm0
204 ; SSE2-NEXT: addss %xmm3, %xmm0
205 ; SSE2-NEXT: movaps %xmm3, %xmm1
206 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
207 ; SSE2-NEXT: addss %xmm1, %xmm0
208 ; SSE2-NEXT: movaps %xmm3, %xmm1
209 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
210 ; SSE2-NEXT: addss %xmm1, %xmm0
211 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
212 ; SSE2-NEXT: addss %xmm3, %xmm0
213 ; SSE2-NEXT: addss %xmm4, %xmm0
214 ; SSE2-NEXT: movaps %xmm4, %xmm1
215 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[2,3]
216 ; SSE2-NEXT: addss %xmm1, %xmm0
217 ; SSE2-NEXT: movaps %xmm4, %xmm1
218 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
219 ; SSE2-NEXT: addss %xmm1, %xmm0
220 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1,2,3]
221 ; SSE2-NEXT: addss %xmm4, %xmm0
224 ; SSE41-LABEL: test_v16f32:
226 ; SSE41-NEXT: addss %xmm1, %xmm0
227 ; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
228 ; SSE41-NEXT: addss %xmm5, %xmm0
229 ; SSE41-NEXT: movaps %xmm1, %xmm5
230 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
231 ; SSE41-NEXT: addss %xmm5, %xmm0
232 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
233 ; SSE41-NEXT: addss %xmm1, %xmm0
234 ; SSE41-NEXT: addss %xmm2, %xmm0
235 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
236 ; SSE41-NEXT: addss %xmm1, %xmm0
237 ; SSE41-NEXT: movaps %xmm2, %xmm1
238 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
239 ; SSE41-NEXT: addss %xmm1, %xmm0
240 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
241 ; SSE41-NEXT: addss %xmm2, %xmm0
242 ; SSE41-NEXT: addss %xmm3, %xmm0
243 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
244 ; SSE41-NEXT: addss %xmm1, %xmm0
245 ; SSE41-NEXT: movaps %xmm3, %xmm1
246 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
247 ; SSE41-NEXT: addss %xmm1, %xmm0
248 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
249 ; SSE41-NEXT: addss %xmm3, %xmm0
250 ; SSE41-NEXT: addss %xmm4, %xmm0
251 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm4[1,1,3,3]
252 ; SSE41-NEXT: addss %xmm1, %xmm0
253 ; SSE41-NEXT: movaps %xmm4, %xmm1
254 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
255 ; SSE41-NEXT: addss %xmm1, %xmm0
256 ; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1,2,3]
257 ; SSE41-NEXT: addss %xmm4, %xmm0
260 ; AVX-LABEL: test_v16f32:
262 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
263 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
264 ; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
265 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
266 ; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
267 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
268 ; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
269 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
270 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
271 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
272 ; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
273 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
274 ; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
275 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
276 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
277 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
278 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
279 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
280 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
281 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
282 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
283 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
284 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
285 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
286 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
287 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
288 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
289 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
290 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
291 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
292 ; AVX-NEXT: vzeroupper
295 ; AVX512-LABEL: test_v16f32:
297 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
298 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
299 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
300 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
301 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
302 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
303 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
304 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
305 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
306 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
307 ; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
308 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
309 ; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
310 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
311 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
312 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
313 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
314 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
315 ; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
316 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
317 ; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
318 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
319 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
320 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
321 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
322 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
323 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
324 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
325 ; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
326 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
327 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
328 ; AVX512-NEXT: vzeroupper
330 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float %a0, <16 x float> %a1)
338 define float @test_v2f32_zero(<2 x float> %a0) {
339 ; SSE2-LABEL: test_v2f32_zero:
341 ; SSE2-NEXT: xorps %xmm1, %xmm1
342 ; SSE2-NEXT: addss %xmm0, %xmm1
343 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
344 ; SSE2-NEXT: addss %xmm1, %xmm0
347 ; SSE41-LABEL: test_v2f32_zero:
349 ; SSE41-NEXT: xorps %xmm1, %xmm1
350 ; SSE41-NEXT: addss %xmm0, %xmm1
351 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
352 ; SSE41-NEXT: addss %xmm1, %xmm0
355 ; AVX-LABEL: test_v2f32_zero:
357 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
358 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
359 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
360 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
363 ; AVX512-LABEL: test_v2f32_zero:
365 ; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
366 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
367 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
368 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
370 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
374 define float @test_v4f32_zero(<4 x float> %a0) {
375 ; SSE2-LABEL: test_v4f32_zero:
377 ; SSE2-NEXT: xorps %xmm1, %xmm1
378 ; SSE2-NEXT: addss %xmm0, %xmm1
379 ; SSE2-NEXT: movaps %xmm0, %xmm2
380 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
381 ; SSE2-NEXT: addss %xmm1, %xmm2
382 ; SSE2-NEXT: movaps %xmm0, %xmm1
383 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
384 ; SSE2-NEXT: addss %xmm2, %xmm1
385 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
386 ; SSE2-NEXT: addss %xmm1, %xmm0
389 ; SSE41-LABEL: test_v4f32_zero:
391 ; SSE41-NEXT: xorps %xmm1, %xmm1
392 ; SSE41-NEXT: addss %xmm0, %xmm1
393 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
394 ; SSE41-NEXT: addss %xmm1, %xmm2
395 ; SSE41-NEXT: movaps %xmm0, %xmm1
396 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
397 ; SSE41-NEXT: addss %xmm2, %xmm1
398 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
399 ; SSE41-NEXT: addss %xmm1, %xmm0
402 ; AVX-LABEL: test_v4f32_zero:
404 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
405 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
406 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
407 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
408 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
409 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
410 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
411 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
414 ; AVX512-LABEL: test_v4f32_zero:
416 ; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
417 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
418 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
419 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
420 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
421 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
422 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
423 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
425 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
429 define float @test_v8f32_zero(<8 x float> %a0) {
430 ; SSE2-LABEL: test_v8f32_zero:
432 ; SSE2-NEXT: xorps %xmm2, %xmm2
433 ; SSE2-NEXT: addss %xmm0, %xmm2
434 ; SSE2-NEXT: movaps %xmm0, %xmm3
435 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
436 ; SSE2-NEXT: addss %xmm2, %xmm3
437 ; SSE2-NEXT: movaps %xmm0, %xmm2
438 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
439 ; SSE2-NEXT: addss %xmm3, %xmm2
440 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
441 ; SSE2-NEXT: addss %xmm2, %xmm0
442 ; SSE2-NEXT: addss %xmm1, %xmm0
443 ; SSE2-NEXT: movaps %xmm1, %xmm2
444 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
445 ; SSE2-NEXT: addss %xmm2, %xmm0
446 ; SSE2-NEXT: movaps %xmm1, %xmm2
447 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
448 ; SSE2-NEXT: addss %xmm2, %xmm0
449 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
450 ; SSE2-NEXT: addss %xmm1, %xmm0
453 ; SSE41-LABEL: test_v8f32_zero:
455 ; SSE41-NEXT: xorps %xmm2, %xmm2
456 ; SSE41-NEXT: addss %xmm0, %xmm2
457 ; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
458 ; SSE41-NEXT: addss %xmm2, %xmm3
459 ; SSE41-NEXT: movaps %xmm0, %xmm2
460 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
461 ; SSE41-NEXT: addss %xmm3, %xmm2
462 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
463 ; SSE41-NEXT: addss %xmm2, %xmm0
464 ; SSE41-NEXT: addss %xmm1, %xmm0
465 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
466 ; SSE41-NEXT: addss %xmm2, %xmm0
467 ; SSE41-NEXT: movaps %xmm1, %xmm2
468 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
469 ; SSE41-NEXT: addss %xmm2, %xmm0
470 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
471 ; SSE41-NEXT: addss %xmm1, %xmm0
474 ; AVX-LABEL: test_v8f32_zero:
476 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
477 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
478 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
479 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
480 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
481 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
482 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
483 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
484 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
485 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
486 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
487 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
488 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
489 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
490 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
491 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
492 ; AVX-NEXT: vzeroupper
495 ; AVX512-LABEL: test_v8f32_zero:
497 ; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
498 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
499 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
500 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
501 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
502 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
503 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
504 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
505 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
506 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
507 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
508 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
509 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
510 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
511 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
512 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
513 ; AVX512-NEXT: vzeroupper
515 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
519 define float @test_v16f32_zero(<16 x float> %a0) {
520 ; SSE2-LABEL: test_v16f32_zero:
522 ; SSE2-NEXT: xorps %xmm4, %xmm4
523 ; SSE2-NEXT: addss %xmm0, %xmm4
524 ; SSE2-NEXT: movaps %xmm0, %xmm5
525 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[2,3]
526 ; SSE2-NEXT: addss %xmm4, %xmm5
527 ; SSE2-NEXT: movaps %xmm0, %xmm4
528 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
529 ; SSE2-NEXT: addss %xmm5, %xmm4
530 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
531 ; SSE2-NEXT: addss %xmm4, %xmm0
532 ; SSE2-NEXT: addss %xmm1, %xmm0
533 ; SSE2-NEXT: movaps %xmm1, %xmm4
534 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3]
535 ; SSE2-NEXT: addss %xmm4, %xmm0
536 ; SSE2-NEXT: movaps %xmm1, %xmm4
537 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
538 ; SSE2-NEXT: addss %xmm4, %xmm0
539 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
540 ; SSE2-NEXT: addss %xmm1, %xmm0
541 ; SSE2-NEXT: addss %xmm2, %xmm0
542 ; SSE2-NEXT: movaps %xmm2, %xmm1
543 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
544 ; SSE2-NEXT: addss %xmm1, %xmm0
545 ; SSE2-NEXT: movaps %xmm2, %xmm1
546 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
547 ; SSE2-NEXT: addss %xmm1, %xmm0
548 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
549 ; SSE2-NEXT: addss %xmm2, %xmm0
550 ; SSE2-NEXT: addss %xmm3, %xmm0
551 ; SSE2-NEXT: movaps %xmm3, %xmm1
552 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
553 ; SSE2-NEXT: addss %xmm1, %xmm0
554 ; SSE2-NEXT: movaps %xmm3, %xmm1
555 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
556 ; SSE2-NEXT: addss %xmm1, %xmm0
557 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
558 ; SSE2-NEXT: addss %xmm3, %xmm0
561 ; SSE41-LABEL: test_v16f32_zero:
563 ; SSE41-NEXT: xorps %xmm4, %xmm4
564 ; SSE41-NEXT: addss %xmm0, %xmm4
565 ; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
566 ; SSE41-NEXT: addss %xmm4, %xmm5
567 ; SSE41-NEXT: movaps %xmm0, %xmm4
568 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
569 ; SSE41-NEXT: addss %xmm5, %xmm4
570 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
571 ; SSE41-NEXT: addss %xmm4, %xmm0
572 ; SSE41-NEXT: addss %xmm1, %xmm0
573 ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
574 ; SSE41-NEXT: addss %xmm4, %xmm0
575 ; SSE41-NEXT: movaps %xmm1, %xmm4
576 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
577 ; SSE41-NEXT: addss %xmm4, %xmm0
578 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
579 ; SSE41-NEXT: addss %xmm1, %xmm0
580 ; SSE41-NEXT: addss %xmm2, %xmm0
581 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
582 ; SSE41-NEXT: addss %xmm1, %xmm0
583 ; SSE41-NEXT: movaps %xmm2, %xmm1
584 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
585 ; SSE41-NEXT: addss %xmm1, %xmm0
586 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
587 ; SSE41-NEXT: addss %xmm2, %xmm0
588 ; SSE41-NEXT: addss %xmm3, %xmm0
589 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
590 ; SSE41-NEXT: addss %xmm1, %xmm0
591 ; SSE41-NEXT: movaps %xmm3, %xmm1
592 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
593 ; SSE41-NEXT: addss %xmm1, %xmm0
594 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
595 ; SSE41-NEXT: addss %xmm3, %xmm0
598 ; AVX-LABEL: test_v16f32_zero:
600 ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
601 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm2
602 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
603 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
604 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
605 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
606 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
607 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
608 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
609 ; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
610 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
611 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
612 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
613 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
614 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
615 ; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
616 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
617 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
618 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
619 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
620 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
621 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
622 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
623 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
624 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
625 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
626 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
627 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
628 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
629 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
630 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
631 ; AVX-NEXT: vzeroupper
634 ; AVX512-LABEL: test_v16f32_zero:
636 ; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
637 ; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
638 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
639 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
640 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
641 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
642 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
643 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
644 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
645 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
646 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
647 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
648 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
649 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
650 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
651 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
652 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
653 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
654 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
655 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
656 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
657 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
658 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
659 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
660 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
661 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
662 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
663 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
664 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
665 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
666 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
667 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
668 ; AVX512-NEXT: vzeroupper
670 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
678 define float @test_v2f32_undef(<2 x float> %a0) {
679 ; SSE2-LABEL: test_v2f32_undef:
681 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
682 ; SSE2-NEXT: addss {{.*}}(%rip), %xmm0
685 ; SSE41-LABEL: test_v2f32_undef:
687 ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
688 ; SSE41-NEXT: addss {{.*}}(%rip), %xmm0
691 ; AVX-LABEL: test_v2f32_undef:
693 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
694 ; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
697 ; AVX512-LABEL: test_v2f32_undef:
699 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
700 ; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
702 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float undef, <2 x float> %a0)
706 define float @test_v4f32_undef(<4 x float> %a0) {
707 ; SSE2-LABEL: test_v4f32_undef:
709 ; SSE2-NEXT: movaps %xmm0, %xmm1
710 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
711 ; SSE2-NEXT: addss {{.*}}(%rip), %xmm1
712 ; SSE2-NEXT: movaps %xmm0, %xmm2
713 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
714 ; SSE2-NEXT: addss %xmm1, %xmm2
715 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
716 ; SSE2-NEXT: addss %xmm2, %xmm0
719 ; SSE41-LABEL: test_v4f32_undef:
721 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
722 ; SSE41-NEXT: addss {{.*}}(%rip), %xmm1
723 ; SSE41-NEXT: movaps %xmm0, %xmm2
724 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
725 ; SSE41-NEXT: addss %xmm1, %xmm2
726 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
727 ; SSE41-NEXT: addss %xmm2, %xmm0
730 ; AVX-LABEL: test_v4f32_undef:
732 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
733 ; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
734 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
735 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
736 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
737 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
740 ; AVX512-LABEL: test_v4f32_undef:
742 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
743 ; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
744 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
745 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
746 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
747 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
749 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float undef, <4 x float> %a0)
753 define float @test_v8f32_undef(<8 x float> %a0) {
754 ; SSE2-LABEL: test_v8f32_undef:
756 ; SSE2-NEXT: movaps %xmm0, %xmm2
757 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
758 ; SSE2-NEXT: addss {{.*}}(%rip), %xmm2
759 ; SSE2-NEXT: movaps %xmm0, %xmm3
760 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
761 ; SSE2-NEXT: addss %xmm2, %xmm3
762 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
763 ; SSE2-NEXT: addss %xmm3, %xmm0
764 ; SSE2-NEXT: addss %xmm1, %xmm0
765 ; SSE2-NEXT: movaps %xmm1, %xmm2
766 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
767 ; SSE2-NEXT: addss %xmm2, %xmm0
768 ; SSE2-NEXT: movaps %xmm1, %xmm2
769 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
770 ; SSE2-NEXT: addss %xmm2, %xmm0
771 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
772 ; SSE2-NEXT: addss %xmm1, %xmm0
775 ; SSE41-LABEL: test_v8f32_undef:
777 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
778 ; SSE41-NEXT: addss {{.*}}(%rip), %xmm2
779 ; SSE41-NEXT: movaps %xmm0, %xmm3
780 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
781 ; SSE41-NEXT: addss %xmm2, %xmm3
782 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
783 ; SSE41-NEXT: addss %xmm3, %xmm0
784 ; SSE41-NEXT: addss %xmm1, %xmm0
785 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
786 ; SSE41-NEXT: addss %xmm2, %xmm0
787 ; SSE41-NEXT: movaps %xmm1, %xmm2
788 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
789 ; SSE41-NEXT: addss %xmm2, %xmm0
790 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
791 ; SSE41-NEXT: addss %xmm1, %xmm0
794 ; AVX-LABEL: test_v8f32_undef:
796 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
797 ; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
798 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
799 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
800 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
801 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
802 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
803 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
804 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
805 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
806 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
807 ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
808 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
809 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
810 ; AVX-NEXT: vzeroupper
813 ; AVX512-LABEL: test_v8f32_undef:
815 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
816 ; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
817 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
818 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
819 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
820 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
821 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
822 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
823 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
824 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
825 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
826 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
827 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
828 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
829 ; AVX512-NEXT: vzeroupper
831 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float undef, <8 x float> %a0)
835 define float @test_v16f32_undef(<16 x float> %a0) {
836 ; SSE2-LABEL: test_v16f32_undef:
838 ; SSE2-NEXT: movaps %xmm0, %xmm4
839 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[2,3]
840 ; SSE2-NEXT: addss {{.*}}(%rip), %xmm4
841 ; SSE2-NEXT: movaps %xmm0, %xmm5
842 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
843 ; SSE2-NEXT: addss %xmm4, %xmm5
844 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
845 ; SSE2-NEXT: addss %xmm5, %xmm0
846 ; SSE2-NEXT: addss %xmm1, %xmm0
847 ; SSE2-NEXT: movaps %xmm1, %xmm4
848 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3]
849 ; SSE2-NEXT: addss %xmm4, %xmm0
850 ; SSE2-NEXT: movaps %xmm1, %xmm4
851 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
852 ; SSE2-NEXT: addss %xmm4, %xmm0
853 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
854 ; SSE2-NEXT: addss %xmm1, %xmm0
855 ; SSE2-NEXT: addss %xmm2, %xmm0
856 ; SSE2-NEXT: movaps %xmm2, %xmm1
857 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
858 ; SSE2-NEXT: addss %xmm1, %xmm0
859 ; SSE2-NEXT: movaps %xmm2, %xmm1
860 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
861 ; SSE2-NEXT: addss %xmm1, %xmm0
862 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
863 ; SSE2-NEXT: addss %xmm2, %xmm0
864 ; SSE2-NEXT: addss %xmm3, %xmm0
865 ; SSE2-NEXT: movaps %xmm3, %xmm1
866 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
867 ; SSE2-NEXT: addss %xmm1, %xmm0
868 ; SSE2-NEXT: movaps %xmm3, %xmm1
869 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
870 ; SSE2-NEXT: addss %xmm1, %xmm0
871 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
872 ; SSE2-NEXT: addss %xmm3, %xmm0
875 ; SSE41-LABEL: test_v16f32_undef:
877 ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
878 ; SSE41-NEXT: addss {{.*}}(%rip), %xmm4
879 ; SSE41-NEXT: movaps %xmm0, %xmm5
880 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
881 ; SSE41-NEXT: addss %xmm4, %xmm5
882 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
883 ; SSE41-NEXT: addss %xmm5, %xmm0
884 ; SSE41-NEXT: addss %xmm1, %xmm0
885 ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
886 ; SSE41-NEXT: addss %xmm4, %xmm0
887 ; SSE41-NEXT: movaps %xmm1, %xmm4
888 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
889 ; SSE41-NEXT: addss %xmm4, %xmm0
890 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
891 ; SSE41-NEXT: addss %xmm1, %xmm0
892 ; SSE41-NEXT: addss %xmm2, %xmm0
893 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
894 ; SSE41-NEXT: addss %xmm1, %xmm0
895 ; SSE41-NEXT: movaps %xmm2, %xmm1
896 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
897 ; SSE41-NEXT: addss %xmm1, %xmm0
898 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
899 ; SSE41-NEXT: addss %xmm2, %xmm0
900 ; SSE41-NEXT: addss %xmm3, %xmm0
901 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
902 ; SSE41-NEXT: addss %xmm1, %xmm0
903 ; SSE41-NEXT: movaps %xmm3, %xmm1
904 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
905 ; SSE41-NEXT: addss %xmm1, %xmm0
906 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
907 ; SSE41-NEXT: addss %xmm3, %xmm0
910 ; AVX-LABEL: test_v16f32_undef:
912 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
913 ; AVX-NEXT: vaddss {{.*}}(%rip), %xmm2, %xmm2
914 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
915 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
916 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
917 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
918 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
919 ; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
920 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
921 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
922 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
923 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
924 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
925 ; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
926 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
927 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
928 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
929 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
930 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
931 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
932 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
933 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
934 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
935 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
936 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
937 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
938 ; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
939 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
940 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
941 ; AVX-NEXT: vzeroupper
944 ; AVX512-LABEL: test_v16f32_undef:
946 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
947 ; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
948 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
949 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
950 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
951 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
952 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
953 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
954 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
955 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
956 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
957 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
958 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
959 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
960 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
961 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
962 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
963 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
964 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
965 ; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
966 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
967 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
968 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
969 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
970 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
971 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
972 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
973 ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
974 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
975 ; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
976 ; AVX512-NEXT: vzeroupper
978 %1 = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float undef, <16 x float> %a0)
986 define double @test_v2f64(double %a0, <2 x double> %a1) {
987 ; SSE-LABEL: test_v2f64:
989 ; SSE-NEXT: addsd %xmm1, %xmm0
990 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
991 ; SSE-NEXT: addsd %xmm1, %xmm0
994 ; AVX-LABEL: test_v2f64:
996 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
997 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
998 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1001 ; AVX512-LABEL: test_v2f64:
1003 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1004 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1005 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1007 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double %a0, <2 x double> %a1)
1011 define double @test_v4f64(double %a0, <4 x double> %a1) {
1012 ; SSE-LABEL: test_v4f64:
1014 ; SSE-NEXT: addsd %xmm1, %xmm0
1015 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1016 ; SSE-NEXT: addsd %xmm1, %xmm0
1017 ; SSE-NEXT: addsd %xmm2, %xmm0
1018 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1019 ; SSE-NEXT: addsd %xmm2, %xmm0
1022 ; AVX-LABEL: test_v4f64:
1024 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1025 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1026 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1027 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1028 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1029 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1030 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1031 ; AVX-NEXT: vzeroupper
1034 ; AVX512-LABEL: test_v4f64:
1036 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1037 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1038 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1039 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1
1040 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1041 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1042 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1043 ; AVX512-NEXT: vzeroupper
1045 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double %a0, <4 x double> %a1)
1049 define double @test_v8f64(double %a0, <8 x double> %a1) {
1050 ; SSE-LABEL: test_v8f64:
1052 ; SSE-NEXT: addsd %xmm1, %xmm0
1053 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1054 ; SSE-NEXT: addsd %xmm1, %xmm0
1055 ; SSE-NEXT: addsd %xmm2, %xmm0
1056 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1057 ; SSE-NEXT: addsd %xmm2, %xmm0
1058 ; SSE-NEXT: addsd %xmm3, %xmm0
1059 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1060 ; SSE-NEXT: addsd %xmm3, %xmm0
1061 ; SSE-NEXT: addsd %xmm4, %xmm0
1062 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
1063 ; SSE-NEXT: addsd %xmm4, %xmm0
1066 ; AVX-LABEL: test_v8f64:
1068 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1069 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
1070 ; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1071 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1072 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1073 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1074 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1075 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1076 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
1077 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1078 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
1079 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1080 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1081 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1082 ; AVX-NEXT: vzeroupper
1085 ; AVX512-LABEL: test_v8f64:
1087 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1088 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1089 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1090 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
1091 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1092 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1093 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1094 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
1095 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1096 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1097 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1098 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
1099 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1100 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1101 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1102 ; AVX512-NEXT: vzeroupper
1104 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v8f64(double %a0, <8 x double> %a1)
1108 define double @test_v16f64(double %a0, <16 x double> %a1) {
1109 ; SSE-LABEL: test_v16f64:
1111 ; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
1112 ; SSE-NEXT: addsd %xmm1, %xmm0
1113 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1114 ; SSE-NEXT: addsd %xmm1, %xmm0
1115 ; SSE-NEXT: addsd %xmm2, %xmm0
1116 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1117 ; SSE-NEXT: addsd %xmm2, %xmm0
1118 ; SSE-NEXT: addsd %xmm3, %xmm0
1119 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1120 ; SSE-NEXT: addsd %xmm3, %xmm0
1121 ; SSE-NEXT: addsd %xmm4, %xmm0
1122 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
1123 ; SSE-NEXT: addsd %xmm4, %xmm0
1124 ; SSE-NEXT: addsd %xmm5, %xmm0
1125 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1]
1126 ; SSE-NEXT: addsd %xmm5, %xmm0
1127 ; SSE-NEXT: addsd %xmm6, %xmm0
1128 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1]
1129 ; SSE-NEXT: addsd %xmm6, %xmm0
1130 ; SSE-NEXT: addsd %xmm7, %xmm0
1131 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1]
1132 ; SSE-NEXT: addsd %xmm7, %xmm0
1133 ; SSE-NEXT: addsd %xmm8, %xmm0
1134 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1,1]
1135 ; SSE-NEXT: addsd %xmm8, %xmm0
1138 ; AVX-LABEL: test_v16f64:
1140 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1141 ; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
1142 ; AVX-NEXT: vaddsd %xmm5, %xmm0, %xmm0
1143 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1144 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1145 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1146 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1147 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1148 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
1149 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1150 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
1151 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1152 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1153 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1154 ; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1155 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
1156 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1157 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
1158 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1159 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1160 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1161 ; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
1162 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm4[1,0]
1163 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1164 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm1
1165 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1166 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1167 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1168 ; AVX-NEXT: vzeroupper
1171 ; AVX512-LABEL: test_v16f64:
1173 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1174 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
1175 ; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1176 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm3
1177 ; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1178 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1179 ; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1180 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm3
1181 ; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1182 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1183 ; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1184 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
1185 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1186 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1187 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1188 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1189 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
1190 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1191 ; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm1
1192 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1193 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1194 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1195 ; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm1
1196 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1197 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1198 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1199 ; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm1
1200 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1201 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1202 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1203 ; AVX512-NEXT: vzeroupper
1205 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v16f64(double %a0, <16 x double> %a1)
1213 define double @test_v2f64_zero(<2 x double> %a0) {
1214 ; SSE-LABEL: test_v2f64_zero:
1216 ; SSE-NEXT: xorpd %xmm1, %xmm1
1217 ; SSE-NEXT: addsd %xmm0, %xmm1
1218 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1219 ; SSE-NEXT: addsd %xmm1, %xmm0
1222 ; AVX-LABEL: test_v2f64_zero:
1224 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
1225 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
1226 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1227 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1230 ; AVX512-LABEL: test_v2f64_zero:
1232 ; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
1233 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
1234 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1235 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1237 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
1241 define double @test_v4f64_zero(<4 x double> %a0) {
1242 ; SSE-LABEL: test_v4f64_zero:
1244 ; SSE-NEXT: xorpd %xmm2, %xmm2
1245 ; SSE-NEXT: addsd %xmm0, %xmm2
1246 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1247 ; SSE-NEXT: addsd %xmm2, %xmm0
1248 ; SSE-NEXT: addsd %xmm1, %xmm0
1249 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1250 ; SSE-NEXT: addsd %xmm1, %xmm0
1253 ; AVX-LABEL: test_v4f64_zero:
1255 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
1256 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
1257 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
1258 ; AVX-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1259 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1260 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1261 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1262 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1263 ; AVX-NEXT: vzeroupper
1266 ; AVX512-LABEL: test_v4f64_zero:
1268 ; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
1269 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
1270 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
1271 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1272 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
1273 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1274 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1275 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1276 ; AVX512-NEXT: vzeroupper
1278 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
1282 define double @test_v8f64_zero(<8 x double> %a0) {
1283 ; SSE-LABEL: test_v8f64_zero:
1285 ; SSE-NEXT: xorpd %xmm4, %xmm4
1286 ; SSE-NEXT: addsd %xmm0, %xmm4
1287 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1288 ; SSE-NEXT: addsd %xmm4, %xmm0
1289 ; SSE-NEXT: addsd %xmm1, %xmm0
1290 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1291 ; SSE-NEXT: addsd %xmm1, %xmm0
1292 ; SSE-NEXT: addsd %xmm2, %xmm0
1293 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1294 ; SSE-NEXT: addsd %xmm2, %xmm0
1295 ; SSE-NEXT: addsd %xmm3, %xmm0
1296 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1297 ; SSE-NEXT: addsd %xmm3, %xmm0
1300 ; AVX-LABEL: test_v8f64_zero:
1302 ; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
1303 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm2
1304 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
1305 ; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1306 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1307 ; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
1308 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1309 ; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
1310 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1311 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1312 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1313 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1314 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1315 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1316 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1317 ; AVX-NEXT: vzeroupper
1320 ; AVX512-LABEL: test_v8f64_zero:
1322 ; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
1323 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
1324 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
1325 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1326 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
1327 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1328 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1329 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1330 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
1331 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1332 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1333 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1334 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
1335 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1336 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1337 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1338 ; AVX512-NEXT: vzeroupper
1340 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
1344 define double @test_v16f64_zero(<16 x double> %a0) {
1345 ; SSE-LABEL: test_v16f64_zero:
1347 ; SSE-NEXT: xorpd %xmm8, %xmm8
1348 ; SSE-NEXT: addsd %xmm0, %xmm8
1349 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1350 ; SSE-NEXT: addsd %xmm8, %xmm0
1351 ; SSE-NEXT: addsd %xmm1, %xmm0
1352 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1353 ; SSE-NEXT: addsd %xmm1, %xmm0
1354 ; SSE-NEXT: addsd %xmm2, %xmm0
1355 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1356 ; SSE-NEXT: addsd %xmm2, %xmm0
1357 ; SSE-NEXT: addsd %xmm3, %xmm0
1358 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1359 ; SSE-NEXT: addsd %xmm3, %xmm0
1360 ; SSE-NEXT: addsd %xmm4, %xmm0
1361 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
1362 ; SSE-NEXT: addsd %xmm4, %xmm0
1363 ; SSE-NEXT: addsd %xmm5, %xmm0
1364 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1]
1365 ; SSE-NEXT: addsd %xmm5, %xmm0
1366 ; SSE-NEXT: addsd %xmm6, %xmm0
1367 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1]
1368 ; SSE-NEXT: addsd %xmm6, %xmm0
1369 ; SSE-NEXT: addsd %xmm7, %xmm0
1370 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1]
1371 ; SSE-NEXT: addsd %xmm7, %xmm0
1374 ; AVX-LABEL: test_v16f64_zero:
1376 ; AVX-NEXT: vxorpd %xmm4, %xmm4, %xmm4
1377 ; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm4
1378 ; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
1379 ; AVX-NEXT: vaddsd %xmm5, %xmm4, %xmm4
1380 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1381 ; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
1382 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1383 ; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
1384 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1385 ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
1386 ; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
1387 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1388 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1389 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1390 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1391 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1392 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
1393 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1394 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
1395 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1396 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1397 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1398 ; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1399 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
1400 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1401 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
1402 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1403 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1404 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1405 ; AVX-NEXT: vzeroupper
1408 ; AVX512-LABEL: test_v16f64_zero:
1410 ; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
1411 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm2
1412 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
1413 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1414 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3
1415 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1416 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1417 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1418 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm3
1419 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1420 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1421 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1422 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
1423 ; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm2
1424 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1425 ; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm0
1426 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1427 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1428 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1429 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
1430 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1431 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1432 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1433 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
1434 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1435 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1436 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1437 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
1438 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1439 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1440 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1441 ; AVX512-NEXT: vzeroupper
1443 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
1451 define double @test_v2f64_undef(<2 x double> %a0) {
1452 ; SSE-LABEL: test_v2f64_undef:
1454 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1455 ; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
1458 ; AVX-LABEL: test_v2f64_undef:
1460 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1461 ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
1464 ; AVX512-LABEL: test_v2f64_undef:
1466 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1467 ; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
1469 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double undef, <2 x double> %a0)
1473 define double @test_v4f64_undef(<4 x double> %a0) {
1474 ; SSE-LABEL: test_v4f64_undef:
1476 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1477 ; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
1478 ; SSE-NEXT: addsd %xmm1, %xmm0
1479 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1480 ; SSE-NEXT: addsd %xmm1, %xmm0
1483 ; AVX-LABEL: test_v4f64_undef:
1485 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1486 ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
1487 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1488 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1489 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1490 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1491 ; AVX-NEXT: vzeroupper
1494 ; AVX512-LABEL: test_v4f64_undef:
1496 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1497 ; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
1498 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
1499 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1500 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1501 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1502 ; AVX512-NEXT: vzeroupper
1504 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double undef, <4 x double> %a0)
1508 define double @test_v8f64_undef(<8 x double> %a0) {
1509 ; SSE-LABEL: test_v8f64_undef:
1511 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1512 ; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
1513 ; SSE-NEXT: addsd %xmm1, %xmm0
1514 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1515 ; SSE-NEXT: addsd %xmm1, %xmm0
1516 ; SSE-NEXT: addsd %xmm2, %xmm0
1517 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1518 ; SSE-NEXT: addsd %xmm2, %xmm0
1519 ; SSE-NEXT: addsd %xmm3, %xmm0
1520 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1521 ; SSE-NEXT: addsd %xmm3, %xmm0
1524 ; AVX-LABEL: test_v8f64_undef:
1526 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
1527 ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2
1528 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1529 ; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
1530 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1531 ; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
1532 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1533 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1534 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1535 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1536 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1537 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1538 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1539 ; AVX-NEXT: vzeroupper
1542 ; AVX512-LABEL: test_v8f64_undef:
1544 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
1545 ; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
1546 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
1547 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1548 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1549 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1550 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
1551 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1552 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1553 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
1554 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
1555 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
1556 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1557 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
1558 ; AVX512-NEXT: vzeroupper
1560 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v8f64(double undef, <8 x double> %a0)
1564 define double @test_v16f64_undef(<16 x double> %a0) {
1565 ; SSE-LABEL: test_v16f64_undef:
1567 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
1568 ; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
1569 ; SSE-NEXT: addsd %xmm1, %xmm0
1570 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
1571 ; SSE-NEXT: addsd %xmm1, %xmm0
1572 ; SSE-NEXT: addsd %xmm2, %xmm0
1573 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
1574 ; SSE-NEXT: addsd %xmm2, %xmm0
1575 ; SSE-NEXT: addsd %xmm3, %xmm0
1576 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
1577 ; SSE-NEXT: addsd %xmm3, %xmm0
1578 ; SSE-NEXT: addsd %xmm4, %xmm0
1579 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1]
1580 ; SSE-NEXT: addsd %xmm4, %xmm0
1581 ; SSE-NEXT: addsd %xmm5, %xmm0
1582 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1]
1583 ; SSE-NEXT: addsd %xmm5, %xmm0
1584 ; SSE-NEXT: addsd %xmm6, %xmm0
1585 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1]
1586 ; SSE-NEXT: addsd %xmm6, %xmm0
1587 ; SSE-NEXT: addsd %xmm7, %xmm0
1588 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1]
1589 ; SSE-NEXT: addsd %xmm7, %xmm0
1592 ; AVX-LABEL: test_v16f64_undef:
1594 ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
1595 ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm4, %xmm4
1596 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
1597 ; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
1598 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1599 ; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
1600 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1601 ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
1602 ; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
1603 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
1604 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1605 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1606 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1607 ; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1608 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
1609 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1610 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
1611 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1612 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1613 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1614 ; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
1615 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
1616 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1617 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
1618 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1619 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1620 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1621 ; AVX-NEXT: vzeroupper
1624 ; AVX512-LABEL: test_v16f64_undef:
1626 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
1627 ; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2
1628 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3
1629 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1630 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1631 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1632 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm3
1633 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1634 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
1635 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
1636 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
1637 ; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm2
1638 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
1639 ; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm0
1640 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1641 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
1642 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1643 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
1644 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1645 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1646 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1647 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
1648 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1649 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
1650 ; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
1651 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
1652 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1653 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1654 ; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
1655 ; AVX512-NEXT: vzeroupper
1657 %1 = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v16f64(double undef, <16 x double> %a0)
1661 declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float, <2 x float>)
1662 declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float, <4 x float>)
1663 declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float, <8 x float>)
1664 declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>)
1666 declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double, <2 x double>)
1667 declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double, <4 x double>)
1668 declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v8f64(double, <8 x double>)
1669 declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v16f64(double, <16 x double>)