1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -verify-machineinstrs --show-mc-encoding -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 | FileCheck %s --check-prefixes=CHECK
4 declare half @llvm.minnum.f16(half, half)
5 declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>)
6 declare <4 x half> @llvm.minnum.v4f16(<4 x half>, <4 x half>)
7 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>)
8 declare <16 x half> @llvm.minnum.v16f16(<16 x half>, <16 x half>)
9 declare <32 x half> @llvm.minnum.v32f16(<32 x half>, <32 x half>)
11 define half @test_intrinsic_fminh(half %x, half %y) {
12 ; CHECK-LABEL: test_intrinsic_fminh:
14 ; CHECK-NEXT: vminsh %xmm0, %xmm1, %xmm2 # encoding: [0x62,0xf5,0x76,0x08,0x5d,0xd0]
15 ; CHECK-NEXT: vcmpunordsh %xmm0, %xmm0, %k1 # encoding: [0x62,0xf3,0x7e,0x08,0xc2,0xc8,0x03]
16 ; CHECK-NEXT: vmovsh %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xd1]
17 ; CHECK-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
18 ; CHECK-NEXT: retq # encoding: [0xc3]
19 %z = call half @llvm.minnum.f16(half %x, half %y) readnone
23 define <2 x half> @test_intrinsic_fmin_v2f16(<2 x half> %x, <2 x half> %y) {
24 ; CHECK-LABEL: test_intrinsic_fmin_v2f16:
26 ; CHECK-NEXT: vminph %xmm0, %xmm1, %xmm2 # encoding: [0x62,0xf5,0x74,0x08,0x5d,0xd0]
27 ; CHECK-NEXT: vcmpunordph %xmm0, %xmm0, %k1 # encoding: [0x62,0xf3,0x7c,0x08,0xc2,0xc8,0x03]
28 ; CHECK-NEXT: vmovdqu16 %xmm1, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x6f,0xd1]
29 ; CHECK-NEXT: vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
30 ; CHECK-NEXT: retq # encoding: [0xc3]
31 %z = call <2 x half> @llvm.minnum.v2f16(<2 x half> %x, <2 x half> %y) readnone
35 define <4 x half> @test_intrinsic_fmin_v4f16(<4 x half> %x, <4 x half> %y) {
36 ; CHECK-LABEL: test_intrinsic_fmin_v4f16:
38 ; CHECK-NEXT: vminph %xmm0, %xmm1, %xmm2 # encoding: [0x62,0xf5,0x74,0x08,0x5d,0xd0]
39 ; CHECK-NEXT: vcmpunordph %xmm0, %xmm0, %k1 # encoding: [0x62,0xf3,0x7c,0x08,0xc2,0xc8,0x03]
40 ; CHECK-NEXT: vmovdqu16 %xmm1, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x6f,0xd1]
41 ; CHECK-NEXT: vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
42 ; CHECK-NEXT: retq # encoding: [0xc3]
43 %z = call <4 x half> @llvm.minnum.v4f16(<4 x half> %x, <4 x half> %y) readnone
47 define <8 x half> @test_intrinsic_fmin_v8f16(<8 x half> %x, <8 x half> %y) {
48 ; CHECK-LABEL: test_intrinsic_fmin_v8f16:
50 ; CHECK-NEXT: vminph %xmm0, %xmm1, %xmm2 # encoding: [0x62,0xf5,0x74,0x08,0x5d,0xd0]
51 ; CHECK-NEXT: vcmpunordph %xmm0, %xmm0, %k1 # encoding: [0x62,0xf3,0x7c,0x08,0xc2,0xc8,0x03]
52 ; CHECK-NEXT: vmovdqu16 %xmm1, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x6f,0xd1]
53 ; CHECK-NEXT: vmovdqa %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
54 ; CHECK-NEXT: retq # encoding: [0xc3]
55 %z = call <8 x half> @llvm.minnum.v8f16(<8 x half> %x, <8 x half> %y) readnone
59 define <16 x half> @test_intrinsic_fmin_v16f16(<16 x half> %x, <16 x half> %y) {
60 ; CHECK-LABEL: test_intrinsic_fmin_v16f16:
62 ; CHECK-NEXT: vminph %ymm0, %ymm1, %ymm2 # encoding: [0x62,0xf5,0x74,0x28,0x5d,0xd0]
63 ; CHECK-NEXT: vcmpunordph %ymm0, %ymm0, %k1 # encoding: [0x62,0xf3,0x7c,0x28,0xc2,0xc8,0x03]
64 ; CHECK-NEXT: vmovdqu16 %ymm1, %ymm2 {%k1} # encoding: [0x62,0xf1,0xff,0x29,0x6f,0xd1]
65 ; CHECK-NEXT: vmovdqa %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
66 ; CHECK-NEXT: retq # encoding: [0xc3]
67 %z = call <16 x half> @llvm.minnum.v16f16(<16 x half> %x, <16 x half> %y) readnone
71 define <32 x half> @test_intrinsic_fmin_v32f16(<32 x half> %x, <32 x half> %y) {
72 ; CHECK-LABEL: test_intrinsic_fmin_v32f16:
74 ; CHECK-NEXT: vminph %zmm0, %zmm1, %zmm2 # encoding: [0x62,0xf5,0x74,0x48,0x5d,0xd0]
75 ; CHECK-NEXT: vcmpunordph %zmm0, %zmm0, %k1 # encoding: [0x62,0xf3,0x7c,0x48,0xc2,0xc8,0x03]
76 ; CHECK-NEXT: vmovdqu16 %zmm1, %zmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x49,0x6f,0xd1]
77 ; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
78 ; CHECK-NEXT: retq # encoding: [0xc3]
79 %z = call <32 x half> @llvm.minnum.v32f16(<32 x half> %x, <32 x half> %y) readnone
83 define <4 x half> @minnum_intrinsic_nnan_fmf_f432(<4 x half> %a, <4 x half> %b) {
84 ; CHECK-LABEL: minnum_intrinsic_nnan_fmf_f432:
86 ; CHECK-NEXT: vminph %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x5d,0xc1]
87 ; CHECK-NEXT: retq # encoding: [0xc3]
88 %r = tail call nnan <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %b)
92 define half @minnum_intrinsic_nnan_attr_f16(half %a, half %b) #0 {
93 ; CHECK-LABEL: minnum_intrinsic_nnan_attr_f16:
95 ; CHECK-NEXT: vminsh %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x5d,0xc1]
96 ; CHECK-NEXT: retq # encoding: [0xc3]
97 %r = tail call half @llvm.minnum.f16(half %a, half %b)
101 define half @test_minnum_const_op1(half %x) {
102 ; CHECK-LABEL: test_minnum_const_op1:
104 ; CHECK-NEXT: vminsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x5d,0x05,A,A,A,A]
105 ; CHECK-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
106 ; CHECK-NEXT: retq # encoding: [0xc3]
107 %r = call half @llvm.minnum.f16(half 1.0, half %x)
111 define half @test_minnum_const_op2(half %x) {
112 ; CHECK-LABEL: test_minnum_const_op2:
114 ; CHECK-NEXT: vminsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x5d,0x05,A,A,A,A]
115 ; CHECK-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
116 ; CHECK-NEXT: retq # encoding: [0xc3]
117 %r = call half @llvm.minnum.f16(half %x, half 1.0)
121 define half @test_minnum_const_nan(half %x) {
122 ; CHECK-LABEL: test_minnum_const_nan:
124 ; CHECK-NEXT: retq # encoding: [0xc3]
125 %r = call half @llvm.minnum.f16(half %x, half 0x7fff000000000000)
129 attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }