1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE4
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
7 declare float @fminf(float, float)
8 declare double @fmin(double, double)
9 declare x86_fp80 @fminl(x86_fp80, x86_fp80)
10 declare float @llvm.minnum.f32(float, float)
11 declare double @llvm.minnum.f64(double, double)
12 declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80)
14 declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
15 declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
16 declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>)
17 declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>)
18 declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
19 declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
20 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
22 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
24 define float @test_fminf(float %x, float %y) {
25 ; SSE-LABEL: test_fminf:
27 ; SSE-NEXT: movaps %xmm0, %xmm2
28 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
29 ; SSE-NEXT: movaps %xmm2, %xmm3
30 ; SSE-NEXT: andps %xmm1, %xmm3
31 ; SSE-NEXT: minss %xmm0, %xmm1
32 ; SSE-NEXT: andnps %xmm1, %xmm2
33 ; SSE-NEXT: orps %xmm3, %xmm2
34 ; SSE-NEXT: movaps %xmm2, %xmm0
37 ; AVX1-LABEL: test_fminf:
39 ; AVX1-NEXT: vminss %xmm0, %xmm1, %xmm2
40 ; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
41 ; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
44 ; AVX512-LABEL: test_fminf:
46 ; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm2
47 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
48 ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
49 ; AVX512-NEXT: vmovaps %xmm2, %xmm0
51 %z = call float @fminf(float %x, float %y) readnone
55 define float @test_fminf_minsize(float %x, float %y) minsize {
56 ; CHECK-LABEL: test_fminf_minsize:
58 ; CHECK-NEXT: jmp fminf@PLT # TAILCALL
59 %z = call float @fminf(float %x, float %y) readnone
63 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
65 define double @test_fmin(double %x, double %y) {
66 ; SSE-LABEL: test_fmin:
68 ; SSE-NEXT: movapd %xmm0, %xmm2
69 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
70 ; SSE-NEXT: movapd %xmm2, %xmm3
71 ; SSE-NEXT: andpd %xmm1, %xmm3
72 ; SSE-NEXT: minsd %xmm0, %xmm1
73 ; SSE-NEXT: andnpd %xmm1, %xmm2
74 ; SSE-NEXT: orpd %xmm3, %xmm2
75 ; SSE-NEXT: movapd %xmm2, %xmm0
78 ; AVX1-LABEL: test_fmin:
80 ; AVX1-NEXT: vminsd %xmm0, %xmm1, %xmm2
81 ; AVX1-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
82 ; AVX1-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
85 ; AVX512-LABEL: test_fmin:
87 ; AVX512-NEXT: vminsd %xmm0, %xmm1, %xmm2
88 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
89 ; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
90 ; AVX512-NEXT: vmovapd %xmm2, %xmm0
92 %z = call double @fmin(double %x, double %y) readnone
96 define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
97 ; CHECK-LABEL: test_fminl:
99 ; CHECK-NEXT: subq $40, %rsp
100 ; CHECK-NEXT: .cfi_def_cfa_offset 48
101 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
102 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
103 ; CHECK-NEXT: fstpt {{[0-9]+}}(%rsp)
104 ; CHECK-NEXT: fstpt (%rsp)
105 ; CHECK-NEXT: callq fminl@PLT
106 ; CHECK-NEXT: addq $40, %rsp
107 ; CHECK-NEXT: .cfi_def_cfa_offset 8
109 %z = call x86_fp80 @fminl(x86_fp80 %x, x86_fp80 %y) readnone
113 define float @test_intrinsic_fminf(float %x, float %y) {
114 ; SSE-LABEL: test_intrinsic_fminf:
116 ; SSE-NEXT: movaps %xmm0, %xmm2
117 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
118 ; SSE-NEXT: movaps %xmm2, %xmm3
119 ; SSE-NEXT: andps %xmm1, %xmm3
120 ; SSE-NEXT: minss %xmm0, %xmm1
121 ; SSE-NEXT: andnps %xmm1, %xmm2
122 ; SSE-NEXT: orps %xmm3, %xmm2
123 ; SSE-NEXT: movaps %xmm2, %xmm0
126 ; AVX1-LABEL: test_intrinsic_fminf:
128 ; AVX1-NEXT: vminss %xmm0, %xmm1, %xmm2
129 ; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
130 ; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
133 ; AVX512-LABEL: test_intrinsic_fminf:
135 ; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm2
136 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
137 ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
138 ; AVX512-NEXT: vmovaps %xmm2, %xmm0
140 %z = call float @llvm.minnum.f32(float %x, float %y) readnone
144 define double @test_intrinsic_fmin(double %x, double %y) {
145 ; SSE-LABEL: test_intrinsic_fmin:
147 ; SSE-NEXT: movapd %xmm0, %xmm2
148 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
149 ; SSE-NEXT: movapd %xmm2, %xmm3
150 ; SSE-NEXT: andpd %xmm1, %xmm3
151 ; SSE-NEXT: minsd %xmm0, %xmm1
152 ; SSE-NEXT: andnpd %xmm1, %xmm2
153 ; SSE-NEXT: orpd %xmm3, %xmm2
154 ; SSE-NEXT: movapd %xmm2, %xmm0
157 ; AVX1-LABEL: test_intrinsic_fmin:
159 ; AVX1-NEXT: vminsd %xmm0, %xmm1, %xmm2
160 ; AVX1-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
161 ; AVX1-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
164 ; AVX512-LABEL: test_intrinsic_fmin:
166 ; AVX512-NEXT: vminsd %xmm0, %xmm1, %xmm2
167 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
168 ; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
169 ; AVX512-NEXT: vmovapd %xmm2, %xmm0
171 %z = call double @llvm.minnum.f64(double %x, double %y) readnone
175 define x86_fp80 @test_intrinsic_fminl(x86_fp80 %x, x86_fp80 %y) {
176 ; CHECK-LABEL: test_intrinsic_fminl:
178 ; CHECK-NEXT: subq $40, %rsp
179 ; CHECK-NEXT: .cfi_def_cfa_offset 48
180 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
181 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
182 ; CHECK-NEXT: fstpt {{[0-9]+}}(%rsp)
183 ; CHECK-NEXT: fstpt (%rsp)
184 ; CHECK-NEXT: callq fminl@PLT
185 ; CHECK-NEXT: addq $40, %rsp
186 ; CHECK-NEXT: .cfi_def_cfa_offset 8
188 %z = call x86_fp80 @llvm.minnum.f80(x86_fp80 %x, x86_fp80 %y) readnone
192 define <2 x float> @test_intrinsic_fmin_v2f32(<2 x float> %x, <2 x float> %y) {
193 ; SSE2-LABEL: test_intrinsic_fmin_v2f32:
195 ; SSE2-NEXT: movaps %xmm1, %xmm2
196 ; SSE2-NEXT: minps %xmm0, %xmm2
197 ; SSE2-NEXT: cmpunordps %xmm0, %xmm0
198 ; SSE2-NEXT: andps %xmm0, %xmm1
199 ; SSE2-NEXT: andnps %xmm2, %xmm0
200 ; SSE2-NEXT: orps %xmm1, %xmm0
203 ; SSE4-LABEL: test_intrinsic_fmin_v2f32:
205 ; SSE4-NEXT: movaps %xmm1, %xmm2
206 ; SSE4-NEXT: minps %xmm0, %xmm2
207 ; SSE4-NEXT: cmpunordps %xmm0, %xmm0
208 ; SSE4-NEXT: blendvps %xmm0, %xmm1, %xmm2
209 ; SSE4-NEXT: movaps %xmm2, %xmm0
212 ; AVX-LABEL: test_intrinsic_fmin_v2f32:
214 ; AVX-NEXT: vminps %xmm0, %xmm1, %xmm2
215 ; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
216 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
218 %z = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
222 define <4 x float> @test_intrinsic_fmin_v4f32(<4 x float> %x, <4 x float> %y) {
223 ; SSE2-LABEL: test_intrinsic_fmin_v4f32:
225 ; SSE2-NEXT: movaps %xmm1, %xmm2
226 ; SSE2-NEXT: minps %xmm0, %xmm2
227 ; SSE2-NEXT: cmpunordps %xmm0, %xmm0
228 ; SSE2-NEXT: andps %xmm0, %xmm1
229 ; SSE2-NEXT: andnps %xmm2, %xmm0
230 ; SSE2-NEXT: orps %xmm1, %xmm0
233 ; SSE4-LABEL: test_intrinsic_fmin_v4f32:
235 ; SSE4-NEXT: movaps %xmm1, %xmm2
236 ; SSE4-NEXT: minps %xmm0, %xmm2
237 ; SSE4-NEXT: cmpunordps %xmm0, %xmm0
238 ; SSE4-NEXT: blendvps %xmm0, %xmm1, %xmm2
239 ; SSE4-NEXT: movaps %xmm2, %xmm0
242 ; AVX-LABEL: test_intrinsic_fmin_v4f32:
244 ; AVX-NEXT: vminps %xmm0, %xmm1, %xmm2
245 ; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
246 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
248 %z = call <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
252 define <8 x float> @test_intrinsic_fmin_v8f32(<8 x float> %x, <8 x float> %y) {
253 ; SSE2-LABEL: test_intrinsic_fmin_v8f32:
255 ; SSE2-NEXT: movaps %xmm2, %xmm4
256 ; SSE2-NEXT: minps %xmm0, %xmm4
257 ; SSE2-NEXT: cmpunordps %xmm0, %xmm0
258 ; SSE2-NEXT: andps %xmm0, %xmm2
259 ; SSE2-NEXT: andnps %xmm4, %xmm0
260 ; SSE2-NEXT: orps %xmm2, %xmm0
261 ; SSE2-NEXT: movaps %xmm3, %xmm2
262 ; SSE2-NEXT: minps %xmm1, %xmm2
263 ; SSE2-NEXT: cmpunordps %xmm1, %xmm1
264 ; SSE2-NEXT: andps %xmm1, %xmm3
265 ; SSE2-NEXT: andnps %xmm2, %xmm1
266 ; SSE2-NEXT: orps %xmm3, %xmm1
269 ; SSE4-LABEL: test_intrinsic_fmin_v8f32:
271 ; SSE4-NEXT: movaps %xmm1, %xmm5
272 ; SSE4-NEXT: movaps %xmm2, %xmm4
273 ; SSE4-NEXT: minps %xmm0, %xmm4
274 ; SSE4-NEXT: cmpunordps %xmm0, %xmm0
275 ; SSE4-NEXT: blendvps %xmm0, %xmm2, %xmm4
276 ; SSE4-NEXT: movaps %xmm3, %xmm1
277 ; SSE4-NEXT: minps %xmm5, %xmm1
278 ; SSE4-NEXT: cmpunordps %xmm5, %xmm5
279 ; SSE4-NEXT: movaps %xmm5, %xmm0
280 ; SSE4-NEXT: blendvps %xmm0, %xmm3, %xmm1
281 ; SSE4-NEXT: movaps %xmm4, %xmm0
284 ; AVX-LABEL: test_intrinsic_fmin_v8f32:
286 ; AVX-NEXT: vminps %ymm0, %ymm1, %ymm2
287 ; AVX-NEXT: vcmpunordps %ymm0, %ymm0, %ymm0
288 ; AVX-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0
290 %z = call <8 x float> @llvm.minnum.v8f32(<8 x float> %x, <8 x float> %y) readnone
294 define <16 x float> @test_intrinsic_fmin_v16f32(<16 x float> %x, <16 x float> %y) {
295 ; SSE2-LABEL: test_intrinsic_fmin_v16f32:
297 ; SSE2-NEXT: movaps %xmm4, %xmm8
298 ; SSE2-NEXT: minps %xmm0, %xmm8
299 ; SSE2-NEXT: cmpunordps %xmm0, %xmm0
300 ; SSE2-NEXT: andps %xmm0, %xmm4
301 ; SSE2-NEXT: andnps %xmm8, %xmm0
302 ; SSE2-NEXT: orps %xmm4, %xmm0
303 ; SSE2-NEXT: movaps %xmm5, %xmm4
304 ; SSE2-NEXT: minps %xmm1, %xmm4
305 ; SSE2-NEXT: cmpunordps %xmm1, %xmm1
306 ; SSE2-NEXT: andps %xmm1, %xmm5
307 ; SSE2-NEXT: andnps %xmm4, %xmm1
308 ; SSE2-NEXT: orps %xmm5, %xmm1
309 ; SSE2-NEXT: movaps %xmm6, %xmm4
310 ; SSE2-NEXT: minps %xmm2, %xmm4
311 ; SSE2-NEXT: cmpunordps %xmm2, %xmm2
312 ; SSE2-NEXT: andps %xmm2, %xmm6
313 ; SSE2-NEXT: andnps %xmm4, %xmm2
314 ; SSE2-NEXT: orps %xmm6, %xmm2
315 ; SSE2-NEXT: movaps %xmm7, %xmm4
316 ; SSE2-NEXT: minps %xmm3, %xmm4
317 ; SSE2-NEXT: cmpunordps %xmm3, %xmm3
318 ; SSE2-NEXT: andps %xmm3, %xmm7
319 ; SSE2-NEXT: andnps %xmm4, %xmm3
320 ; SSE2-NEXT: orps %xmm7, %xmm3
323 ; SSE4-LABEL: test_intrinsic_fmin_v16f32:
325 ; SSE4-NEXT: movaps %xmm3, %xmm11
326 ; SSE4-NEXT: movaps %xmm2, %xmm10
327 ; SSE4-NEXT: movaps %xmm1, %xmm9
328 ; SSE4-NEXT: movaps %xmm4, %xmm8
329 ; SSE4-NEXT: minps %xmm0, %xmm8
330 ; SSE4-NEXT: cmpunordps %xmm0, %xmm0
331 ; SSE4-NEXT: blendvps %xmm0, %xmm4, %xmm8
332 ; SSE4-NEXT: movaps %xmm5, %xmm1
333 ; SSE4-NEXT: minps %xmm9, %xmm1
334 ; SSE4-NEXT: cmpunordps %xmm9, %xmm9
335 ; SSE4-NEXT: movaps %xmm9, %xmm0
336 ; SSE4-NEXT: blendvps %xmm0, %xmm5, %xmm1
337 ; SSE4-NEXT: movaps %xmm6, %xmm2
338 ; SSE4-NEXT: minps %xmm10, %xmm2
339 ; SSE4-NEXT: cmpunordps %xmm10, %xmm10
340 ; SSE4-NEXT: movaps %xmm10, %xmm0
341 ; SSE4-NEXT: blendvps %xmm0, %xmm6, %xmm2
342 ; SSE4-NEXT: movaps %xmm7, %xmm3
343 ; SSE4-NEXT: minps %xmm11, %xmm3
344 ; SSE4-NEXT: cmpunordps %xmm11, %xmm11
345 ; SSE4-NEXT: movaps %xmm11, %xmm0
346 ; SSE4-NEXT: blendvps %xmm0, %xmm7, %xmm3
347 ; SSE4-NEXT: movaps %xmm8, %xmm0
350 ; AVX1-LABEL: test_intrinsic_fmin_v16f32:
352 ; AVX1-NEXT: vminps %ymm0, %ymm2, %ymm4
353 ; AVX1-NEXT: vcmpunordps %ymm0, %ymm0, %ymm0
354 ; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm4, %ymm0
355 ; AVX1-NEXT: vminps %ymm1, %ymm3, %ymm2
356 ; AVX1-NEXT: vcmpunordps %ymm1, %ymm1, %ymm1
357 ; AVX1-NEXT: vblendvps %ymm1, %ymm3, %ymm2, %ymm1
360 ; AVX512-LABEL: test_intrinsic_fmin_v16f32:
362 ; AVX512-NEXT: vminps %zmm0, %zmm1, %zmm2
363 ; AVX512-NEXT: vcmpunordps %zmm0, %zmm0, %k1
364 ; AVX512-NEXT: vmovaps %zmm1, %zmm2 {%k1}
365 ; AVX512-NEXT: vmovaps %zmm2, %zmm0
367 %z = call <16 x float> @llvm.minnum.v16f32(<16 x float> %x, <16 x float> %y) readnone
371 define <2 x double> @test_intrinsic_fmin_v2f64(<2 x double> %x, <2 x double> %y) {
372 ; SSE2-LABEL: test_intrinsic_fmin_v2f64:
374 ; SSE2-NEXT: movapd %xmm1, %xmm2
375 ; SSE2-NEXT: minpd %xmm0, %xmm2
376 ; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
377 ; SSE2-NEXT: andpd %xmm0, %xmm1
378 ; SSE2-NEXT: andnpd %xmm2, %xmm0
379 ; SSE2-NEXT: orpd %xmm1, %xmm0
382 ; SSE4-LABEL: test_intrinsic_fmin_v2f64:
384 ; SSE4-NEXT: movapd %xmm1, %xmm2
385 ; SSE4-NEXT: minpd %xmm0, %xmm2
386 ; SSE4-NEXT: cmpunordpd %xmm0, %xmm0
387 ; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
388 ; SSE4-NEXT: movapd %xmm2, %xmm0
391 ; AVX-LABEL: test_intrinsic_fmin_v2f64:
393 ; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm2
394 ; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
395 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
397 %z = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
401 define <4 x double> @test_intrinsic_fmin_v4f64(<4 x double> %x, <4 x double> %y) {
402 ; SSE2-LABEL: test_intrinsic_fmin_v4f64:
404 ; SSE2-NEXT: movapd %xmm2, %xmm4
405 ; SSE2-NEXT: minpd %xmm0, %xmm4
406 ; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
407 ; SSE2-NEXT: andpd %xmm0, %xmm2
408 ; SSE2-NEXT: andnpd %xmm4, %xmm0
409 ; SSE2-NEXT: orpd %xmm2, %xmm0
410 ; SSE2-NEXT: movapd %xmm3, %xmm2
411 ; SSE2-NEXT: minpd %xmm1, %xmm2
412 ; SSE2-NEXT: cmpunordpd %xmm1, %xmm1
413 ; SSE2-NEXT: andpd %xmm1, %xmm3
414 ; SSE2-NEXT: andnpd %xmm2, %xmm1
415 ; SSE2-NEXT: orpd %xmm3, %xmm1
418 ; SSE4-LABEL: test_intrinsic_fmin_v4f64:
420 ; SSE4-NEXT: movapd %xmm1, %xmm5
421 ; SSE4-NEXT: movapd %xmm2, %xmm4
422 ; SSE4-NEXT: minpd %xmm0, %xmm4
423 ; SSE4-NEXT: cmpunordpd %xmm0, %xmm0
424 ; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
425 ; SSE4-NEXT: movapd %xmm3, %xmm1
426 ; SSE4-NEXT: minpd %xmm5, %xmm1
427 ; SSE4-NEXT: cmpunordpd %xmm5, %xmm5
428 ; SSE4-NEXT: movapd %xmm5, %xmm0
429 ; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
430 ; SSE4-NEXT: movapd %xmm4, %xmm0
433 ; AVX-LABEL: test_intrinsic_fmin_v4f64:
435 ; AVX-NEXT: vminpd %ymm0, %ymm1, %ymm2
436 ; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
437 ; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
439 %z = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
443 define <8 x double> @test_intrinsic_fmin_v8f64(<8 x double> %x, <8 x double> %y) {
444 ; SSE2-LABEL: test_intrinsic_fmin_v8f64:
446 ; SSE2-NEXT: movapd %xmm4, %xmm8
447 ; SSE2-NEXT: minpd %xmm0, %xmm8
448 ; SSE2-NEXT: cmpunordpd %xmm0, %xmm0
449 ; SSE2-NEXT: andpd %xmm0, %xmm4
450 ; SSE2-NEXT: andnpd %xmm8, %xmm0
451 ; SSE2-NEXT: orpd %xmm4, %xmm0
452 ; SSE2-NEXT: movapd %xmm5, %xmm4
453 ; SSE2-NEXT: minpd %xmm1, %xmm4
454 ; SSE2-NEXT: cmpunordpd %xmm1, %xmm1
455 ; SSE2-NEXT: andpd %xmm1, %xmm5
456 ; SSE2-NEXT: andnpd %xmm4, %xmm1
457 ; SSE2-NEXT: orpd %xmm5, %xmm1
458 ; SSE2-NEXT: movapd %xmm6, %xmm4
459 ; SSE2-NEXT: minpd %xmm2, %xmm4
460 ; SSE2-NEXT: cmpunordpd %xmm2, %xmm2
461 ; SSE2-NEXT: andpd %xmm2, %xmm6
462 ; SSE2-NEXT: andnpd %xmm4, %xmm2
463 ; SSE2-NEXT: orpd %xmm6, %xmm2
464 ; SSE2-NEXT: movapd %xmm7, %xmm4
465 ; SSE2-NEXT: minpd %xmm3, %xmm4
466 ; SSE2-NEXT: cmpunordpd %xmm3, %xmm3
467 ; SSE2-NEXT: andpd %xmm3, %xmm7
468 ; SSE2-NEXT: andnpd %xmm4, %xmm3
469 ; SSE2-NEXT: orpd %xmm7, %xmm3
472 ; SSE4-LABEL: test_intrinsic_fmin_v8f64:
474 ; SSE4-NEXT: movapd %xmm3, %xmm11
475 ; SSE4-NEXT: movapd %xmm2, %xmm10
476 ; SSE4-NEXT: movapd %xmm1, %xmm9
477 ; SSE4-NEXT: movapd %xmm4, %xmm8
478 ; SSE4-NEXT: minpd %xmm0, %xmm8
479 ; SSE4-NEXT: cmpunordpd %xmm0, %xmm0
480 ; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
481 ; SSE4-NEXT: movapd %xmm5, %xmm1
482 ; SSE4-NEXT: minpd %xmm9, %xmm1
483 ; SSE4-NEXT: cmpunordpd %xmm9, %xmm9
484 ; SSE4-NEXT: movapd %xmm9, %xmm0
485 ; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
486 ; SSE4-NEXT: movapd %xmm6, %xmm2
487 ; SSE4-NEXT: minpd %xmm10, %xmm2
488 ; SSE4-NEXT: cmpunordpd %xmm10, %xmm10
489 ; SSE4-NEXT: movapd %xmm10, %xmm0
490 ; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
491 ; SSE4-NEXT: movapd %xmm7, %xmm3
492 ; SSE4-NEXT: minpd %xmm11, %xmm3
493 ; SSE4-NEXT: cmpunordpd %xmm11, %xmm11
494 ; SSE4-NEXT: movapd %xmm11, %xmm0
495 ; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
496 ; SSE4-NEXT: movapd %xmm8, %xmm0
499 ; AVX1-LABEL: test_intrinsic_fmin_v8f64:
501 ; AVX1-NEXT: vminpd %ymm0, %ymm2, %ymm4
502 ; AVX1-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
503 ; AVX1-NEXT: vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
504 ; AVX1-NEXT: vminpd %ymm1, %ymm3, %ymm2
505 ; AVX1-NEXT: vcmpunordpd %ymm1, %ymm1, %ymm1
506 ; AVX1-NEXT: vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
509 ; AVX512-LABEL: test_intrinsic_fmin_v8f64:
511 ; AVX512-NEXT: vminpd %zmm0, %zmm1, %zmm2
512 ; AVX512-NEXT: vcmpunordpd %zmm0, %zmm0, %k1
513 ; AVX512-NEXT: vmovapd %zmm1, %zmm2 {%k1}
514 ; AVX512-NEXT: vmovapd %zmm2, %zmm0
516 %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
520 ; The IR-level FMF propagate to the node. With nnan, there's no need to blend.
522 define float @minnum_intrinsic_nnan_fmf_f32(float %a, float %b) {
523 ; SSE-LABEL: minnum_intrinsic_nnan_fmf_f32:
525 ; SSE-NEXT: minss %xmm1, %xmm0
528 ; AVX-LABEL: minnum_intrinsic_nnan_fmf_f32:
530 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
532 %r = tail call nnan float @llvm.minnum.f32(float %a, float %b)
536 ; Make sure vectors work too.
538 define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x double> %b) {
539 ; SSE-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
541 ; SSE-NEXT: minpd %xmm1, %xmm0
544 ; AVX-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
546 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
548 %r = tail call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
552 ; Current (but legacy someday): a function-level attribute should also enable the fold.
554 define double @minnum_intrinsic_nnan_attr_f64(double %a, double %b) #0 {
555 ; SSE-LABEL: minnum_intrinsic_nnan_attr_f64:
557 ; SSE-NEXT: minsd %xmm1, %xmm0
560 ; AVX-LABEL: minnum_intrinsic_nnan_attr_f64:
562 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
564 %r = tail call double @llvm.minnum.f64(double %a, double %b)
568 ; Make sure vectors work too.
570 define <4 x float> @minnum_intrinsic_nnan_attr_v4f32(<4 x float> %a, <4 x float> %b) #0 {
571 ; SSE-LABEL: minnum_intrinsic_nnan_attr_v4f32:
573 ; SSE-NEXT: minps %xmm1, %xmm0
576 ; AVX-LABEL: minnum_intrinsic_nnan_attr_v4f32:
578 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
580 %r = tail call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
584 define float @test_minnum_const_op1(float %x) {
585 ; SSE-LABEL: test_minnum_const_op1:
587 ; SSE-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
590 ; AVX-LABEL: test_minnum_const_op1:
592 ; AVX-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
594 %r = call float @llvm.minnum.f32(float 1.0, float %x)
598 define float @test_minnum_const_op2(float %x) {
599 ; SSE-LABEL: test_minnum_const_op2:
601 ; SSE-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
604 ; AVX-LABEL: test_minnum_const_op2:
606 ; AVX-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
608 %r = call float @llvm.minnum.f32(float %x, float 1.0)
612 define float @test_minnum_const_nan(float %x) {
613 ; CHECK-LABEL: test_minnum_const_nan:
616 %r = call float @llvm.minnum.f32(float %x, float 0x7fff000000000000)
620 attributes #0 = { "no-nans-fp-math"="true" }