1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512FP16
14 define float @test_v1f32(<1 x float> %a0) {
15 ; ALL-LABEL: test_v1f32:
18 %1 = call nnan float @llvm.vector.reduce.fmin.v1f32(<1 x float> %a0)
22 define float @test_v2f32(<2 x float> %a0) {
23 ; SSE2-LABEL: test_v2f32:
25 ; SSE2-NEXT: movaps %xmm0, %xmm1
26 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
27 ; SSE2-NEXT: minss %xmm1, %xmm0
30 ; SSE41-LABEL: test_v2f32:
32 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
33 ; SSE41-NEXT: minss %xmm1, %xmm0
36 ; AVX-LABEL: test_v2f32:
38 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
39 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
42 ; AVX512-LABEL: test_v2f32:
44 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
45 ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm0
47 %1 = call nnan float @llvm.vector.reduce.fmin.v2f32(<2 x float> %a0)
51 define float @test_v3f32(<3 x float> %a0) {
52 ; SSE2-LABEL: test_v3f32:
54 ; SSE2-NEXT: movaps %xmm0, %xmm2
55 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
56 ; SSE2-NEXT: movaps %xmm0, %xmm1
57 ; SSE2-NEXT: minss %xmm2, %xmm1
58 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
59 ; SSE2-NEXT: minss %xmm0, %xmm1
60 ; SSE2-NEXT: movaps %xmm1, %xmm0
63 ; SSE41-LABEL: test_v3f32:
65 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
66 ; SSE41-NEXT: movaps %xmm0, %xmm1
67 ; SSE41-NEXT: minss %xmm2, %xmm1
68 ; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
69 ; SSE41-NEXT: minss %xmm0, %xmm1
70 ; SSE41-NEXT: movaps %xmm1, %xmm0
73 ; AVX-LABEL: test_v3f32:
75 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
76 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm1
77 ; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
78 ; AVX-NEXT: vminss %xmm0, %xmm1, %xmm0
81 ; AVX512-LABEL: test_v3f32:
83 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
84 ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm1
85 ; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
86 ; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm0
88 %1 = call nnan float @llvm.vector.reduce.fmin.v3f32(<3 x float> %a0)
92 define float @test_v4f32(<4 x float> %a0) {
93 ; SSE2-LABEL: test_v4f32:
95 ; SSE2-NEXT: movaps %xmm0, %xmm1
96 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
97 ; SSE2-NEXT: minps %xmm1, %xmm0
98 ; SSE2-NEXT: movaps %xmm0, %xmm1
99 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
100 ; SSE2-NEXT: minss %xmm1, %xmm0
103 ; SSE41-LABEL: test_v4f32:
105 ; SSE41-NEXT: movaps %xmm0, %xmm1
106 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
107 ; SSE41-NEXT: minps %xmm1, %xmm0
108 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
109 ; SSE41-NEXT: minss %xmm1, %xmm0
112 ; AVX-LABEL: test_v4f32:
114 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
115 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
116 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
117 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
120 ; AVX512-LABEL: test_v4f32:
122 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
123 ; AVX512-NEXT: vminps %xmm1, %xmm0, %xmm0
124 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
125 ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm0
127 %1 = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a0)
131 define float @test_v8f32(<8 x float> %a0) {
132 ; SSE2-LABEL: test_v8f32:
134 ; SSE2-NEXT: minps %xmm1, %xmm0
135 ; SSE2-NEXT: movaps %xmm0, %xmm1
136 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
137 ; SSE2-NEXT: minps %xmm1, %xmm0
138 ; SSE2-NEXT: movaps %xmm0, %xmm1
139 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
140 ; SSE2-NEXT: minss %xmm1, %xmm0
143 ; SSE41-LABEL: test_v8f32:
145 ; SSE41-NEXT: minps %xmm1, %xmm0
146 ; SSE41-NEXT: movaps %xmm0, %xmm1
147 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
148 ; SSE41-NEXT: minps %xmm1, %xmm0
149 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
150 ; SSE41-NEXT: minss %xmm1, %xmm0
153 ; AVX-LABEL: test_v8f32:
155 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
156 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
157 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
158 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
159 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
160 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
161 ; AVX-NEXT: vzeroupper
164 ; AVX512-LABEL: test_v8f32:
166 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
167 ; AVX512-NEXT: vminps %xmm1, %xmm0, %xmm0
168 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
169 ; AVX512-NEXT: vminps %xmm1, %xmm0, %xmm0
170 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
171 ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm0
172 ; AVX512-NEXT: vzeroupper
174 %1 = call nnan float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a0)
178 define float @test_v16f32(<16 x float> %a0) {
179 ; SSE2-LABEL: test_v16f32:
181 ; SSE2-NEXT: minps %xmm3, %xmm1
182 ; SSE2-NEXT: minps %xmm2, %xmm0
183 ; SSE2-NEXT: minps %xmm1, %xmm0
184 ; SSE2-NEXT: movaps %xmm0, %xmm1
185 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
186 ; SSE2-NEXT: minps %xmm1, %xmm0
187 ; SSE2-NEXT: movaps %xmm0, %xmm1
188 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
189 ; SSE2-NEXT: minss %xmm1, %xmm0
192 ; SSE41-LABEL: test_v16f32:
194 ; SSE41-NEXT: minps %xmm3, %xmm1
195 ; SSE41-NEXT: minps %xmm2, %xmm0
196 ; SSE41-NEXT: minps %xmm1, %xmm0
197 ; SSE41-NEXT: movaps %xmm0, %xmm1
198 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
199 ; SSE41-NEXT: minps %xmm1, %xmm0
200 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
201 ; SSE41-NEXT: minss %xmm1, %xmm0
204 ; AVX-LABEL: test_v16f32:
206 ; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
207 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
208 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
209 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
210 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
211 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
212 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
213 ; AVX-NEXT: vzeroupper
216 ; AVX512-LABEL: test_v16f32:
218 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
219 ; AVX512-NEXT: vminps %zmm1, %zmm0, %zmm0
220 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
221 ; AVX512-NEXT: vminps %xmm1, %xmm0, %xmm0
222 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
223 ; AVX512-NEXT: vminps %xmm1, %xmm0, %xmm0
224 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
225 ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm0
226 ; AVX512-NEXT: vzeroupper
228 %1 = call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %a0)
236 define double @test_v2f64(<2 x double> %a0) {
237 ; SSE-LABEL: test_v2f64:
239 ; SSE-NEXT: movapd %xmm0, %xmm1
240 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
241 ; SSE-NEXT: minsd %xmm1, %xmm0
244 ; AVX-LABEL: test_v2f64:
246 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
247 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
250 ; AVX512-LABEL: test_v2f64:
252 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
253 ; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm0
255 %1 = call nnan double @llvm.vector.reduce.fmin.v2f64(<2 x double> %a0)
259 define double @test_v4f64(<4 x double> %a0) {
260 ; SSE-LABEL: test_v4f64:
262 ; SSE-NEXT: minpd %xmm1, %xmm0
263 ; SSE-NEXT: movapd %xmm0, %xmm1
264 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
265 ; SSE-NEXT: minsd %xmm1, %xmm0
268 ; AVX-LABEL: test_v4f64:
270 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
271 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
272 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
273 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
274 ; AVX-NEXT: vzeroupper
277 ; AVX512-LABEL: test_v4f64:
279 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
280 ; AVX512-NEXT: vminpd %xmm1, %xmm0, %xmm0
281 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
282 ; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm0
283 ; AVX512-NEXT: vzeroupper
285 %1 = call nnan double @llvm.vector.reduce.fmin.v4f64(<4 x double> %a0)
289 define double @test_v8f64(<8 x double> %a0) {
290 ; SSE-LABEL: test_v8f64:
292 ; SSE-NEXT: minpd %xmm3, %xmm1
293 ; SSE-NEXT: minpd %xmm2, %xmm0
294 ; SSE-NEXT: minpd %xmm1, %xmm0
295 ; SSE-NEXT: movapd %xmm0, %xmm1
296 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
297 ; SSE-NEXT: minsd %xmm1, %xmm0
300 ; AVX-LABEL: test_v8f64:
302 ; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
303 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
304 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
305 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
306 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
307 ; AVX-NEXT: vzeroupper
310 ; AVX512-LABEL: test_v8f64:
312 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
313 ; AVX512-NEXT: vminpd %zmm1, %zmm0, %zmm0
314 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
315 ; AVX512-NEXT: vminpd %xmm1, %xmm0, %xmm0
316 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
317 ; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm0
318 ; AVX512-NEXT: vzeroupper
320 %1 = call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %a0)
324 define double @test_v16f64(<16 x double> %a0) {
325 ; SSE-LABEL: test_v16f64:
327 ; SSE-NEXT: minpd %xmm6, %xmm2
328 ; SSE-NEXT: minpd %xmm4, %xmm0
329 ; SSE-NEXT: minpd %xmm2, %xmm0
330 ; SSE-NEXT: minpd %xmm7, %xmm3
331 ; SSE-NEXT: minpd %xmm5, %xmm1
332 ; SSE-NEXT: minpd %xmm3, %xmm1
333 ; SSE-NEXT: minpd %xmm1, %xmm0
334 ; SSE-NEXT: movapd %xmm0, %xmm1
335 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
336 ; SSE-NEXT: minsd %xmm1, %xmm0
339 ; AVX-LABEL: test_v16f64:
341 ; AVX-NEXT: vminpd %ymm3, %ymm1, %ymm1
342 ; AVX-NEXT: vminpd %ymm2, %ymm0, %ymm0
343 ; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
344 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
345 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
346 ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
347 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
348 ; AVX-NEXT: vzeroupper
351 ; AVX512-LABEL: test_v16f64:
353 ; AVX512-NEXT: vminpd %zmm1, %zmm0, %zmm0
354 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
355 ; AVX512-NEXT: vminpd %zmm1, %zmm0, %zmm0
356 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
357 ; AVX512-NEXT: vminpd %xmm1, %xmm0, %xmm0
358 ; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
359 ; AVX512-NEXT: vminsd %xmm1, %xmm0, %xmm0
360 ; AVX512-NEXT: vzeroupper
362 %1 = call nnan double @llvm.vector.reduce.fmin.v16f64(<16 x double> %a0)
366 define half @test_v2f16(<2 x half> %a0) nounwind {
367 ; SSE-LABEL: test_v2f16:
369 ; SSE-NEXT: pushq %rbp
370 ; SSE-NEXT: pushq %rbx
371 ; SSE-NEXT: subq $40, %rsp
372 ; SSE-NEXT: movdqa %xmm0, %xmm1
373 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
374 ; SSE-NEXT: psrld $16, %xmm0
375 ; SSE-NEXT: pextrw $0, %xmm0, %ebx
376 ; SSE-NEXT: pextrw $0, %xmm1, %ebp
377 ; SSE-NEXT: callq __extendhfsf2@PLT
378 ; SSE-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
379 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
380 ; SSE-NEXT: callq __extendhfsf2@PLT
381 ; SSE-NEXT: ucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
382 ; SSE-NEXT: cmovbl %ebp, %ebx
383 ; SSE-NEXT: pinsrw $0, %ebx, %xmm0
384 ; SSE-NEXT: addq $40, %rsp
385 ; SSE-NEXT: popq %rbx
386 ; SSE-NEXT: popq %rbp
389 ; AVX-LABEL: test_v2f16:
391 ; AVX-NEXT: pushq %rbp
392 ; AVX-NEXT: pushq %rbx
393 ; AVX-NEXT: subq $40, %rsp
394 ; AVX-NEXT: vmovdqa %xmm0, %xmm1
395 ; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
396 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
397 ; AVX-NEXT: vpextrw $0, %xmm0, %ebx
398 ; AVX-NEXT: vpextrw $0, %xmm1, %ebp
399 ; AVX-NEXT: callq __extendhfsf2@PLT
400 ; AVX-NEXT: vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
401 ; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
402 ; AVX-NEXT: callq __extendhfsf2@PLT
403 ; AVX-NEXT: vucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
404 ; AVX-NEXT: cmovbl %ebp, %ebx
405 ; AVX-NEXT: vpinsrw $0, %ebx, %xmm0, %xmm0
406 ; AVX-NEXT: addq $40, %rsp
407 ; AVX-NEXT: popq %rbx
408 ; AVX-NEXT: popq %rbp
411 ; AVX512F-LABEL: test_v2f16:
413 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
414 ; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
415 ; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
416 ; AVX512F-NEXT: movzwl %ax, %eax
417 ; AVX512F-NEXT: vmovd %eax, %xmm2
418 ; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2
419 ; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
420 ; AVX512F-NEXT: movzwl %ax, %eax
421 ; AVX512F-NEXT: vmovd %eax, %xmm3
422 ; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3
423 ; AVX512F-NEXT: xorl %eax, %eax
424 ; AVX512F-NEXT: vucomiss %xmm3, %xmm2
425 ; AVX512F-NEXT: movl $255, %ecx
426 ; AVX512F-NEXT: cmovael %eax, %ecx
427 ; AVX512F-NEXT: kmovd %ecx, %k1
428 ; AVX512F-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
429 ; AVX512F-NEXT: vmovdqa %xmm1, %xmm0
430 ; AVX512F-NEXT: vzeroupper
433 ; AVX512VL-LABEL: test_v2f16:
435 ; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1
436 ; AVX512VL-NEXT: vpextrw $0, %xmm0, %eax
437 ; AVX512VL-NEXT: movzwl %ax, %eax
438 ; AVX512VL-NEXT: vmovd %eax, %xmm2
439 ; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2
440 ; AVX512VL-NEXT: vpextrw $0, %xmm1, %eax
441 ; AVX512VL-NEXT: movzwl %ax, %eax
442 ; AVX512VL-NEXT: vmovd %eax, %xmm3
443 ; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3
444 ; AVX512VL-NEXT: xorl %eax, %eax
445 ; AVX512VL-NEXT: vucomiss %xmm3, %xmm2
446 ; AVX512VL-NEXT: movl $255, %ecx
447 ; AVX512VL-NEXT: cmovael %eax, %ecx
448 ; AVX512VL-NEXT: kmovd %ecx, %k1
449 ; AVX512VL-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
450 ; AVX512VL-NEXT: vmovdqa %xmm1, %xmm0
451 ; AVX512VL-NEXT: retq
453 ; AVX512FP16-LABEL: test_v2f16:
454 ; AVX512FP16: # %bb.0:
455 ; AVX512FP16-NEXT: vpsrld $16, %xmm0, %xmm1
456 ; AVX512FP16-NEXT: vcmpltph %xmm1, %xmm0, %k1
457 ; AVX512FP16-NEXT: vmovsh %xmm0, %xmm0, %xmm1 {%k1}
458 ; AVX512FP16-NEXT: vmovaps %xmm1, %xmm0
459 ; AVX512FP16-NEXT: retq
460 %1 = call nnan half @llvm.vector.reduce.fmin.v2f16(<2 x half> %a0)
464 declare float @llvm.vector.reduce.fmin.v1f32(<1 x float>)
465 declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>)
466 declare float @llvm.vector.reduce.fmin.v3f32(<3 x float>)
467 declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
468 declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
469 declare float @llvm.vector.reduce.fmin.v16f32(<16 x float>)
471 declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>)
472 declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>)
473 declare double @llvm.vector.reduce.fmin.v8f64(<8 x double>)
474 declare double @llvm.vector.reduce.fmin.v16f64(<16 x double>)
476 declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>)