1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
5 declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata)
6 declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata)
7 declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata)
8 declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata)
9 declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata)
10 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata)
11 declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata)
12 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
13 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
14 declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata)
15 declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata)
16 declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
17 declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata)
18 declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata)
20 define <16 x half> @f2(<16 x half> %a, <16 x half> %b) #0 {
23 ; CHECK-NEXT: vaddph %ymm1, %ymm0, %ymm0
24 ; CHECK-NEXT: ret{{[l|q]}}
25 %ret = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %a, <16 x half> %b,
26 metadata !"round.dynamic",
27 metadata !"fpexcept.strict") #0
31 define <16 x half> @f4(<16 x half> %a, <16 x half> %b) #0 {
34 ; CHECK-NEXT: vsubph %ymm1, %ymm0, %ymm0
35 ; CHECK-NEXT: ret{{[l|q]}}
36 %ret = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %a, <16 x half> %b,
37 metadata !"round.dynamic",
38 metadata !"fpexcept.strict") #0
42 define <16 x half> @f6(<16 x half> %a, <16 x half> %b) #0 {
45 ; CHECK-NEXT: vmulph %ymm1, %ymm0, %ymm0
46 ; CHECK-NEXT: ret{{[l|q]}}
47 %ret = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %a, <16 x half> %b,
48 metadata !"round.dynamic",
49 metadata !"fpexcept.strict") #0
53 define <16 x half> @f8(<16 x half> %a, <16 x half> %b) #0 {
56 ; CHECK-NEXT: vdivph %ymm1, %ymm0, %ymm0
57 ; CHECK-NEXT: ret{{[l|q]}}
58 %ret = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %a, <16 x half> %b,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict") #0
65 define <16 x half> @f10(<16 x half> %a) #0 {
68 ; CHECK-NEXT: vsqrtph %ymm0, %ymm0
69 ; CHECK-NEXT: ret{{[l|q]}}
70 %ret = call <16 x half> @llvm.experimental.constrained.sqrt.v16f16(
72 metadata !"round.dynamic",
73 metadata !"fpexcept.strict") #0
77 define <4 x double> @f11(<4 x half> %a) #0 {
80 ; CHECK-NEXT: vcvtph2pd %xmm0, %ymm0
81 ; CHECK-NEXT: ret{{[l|q]}}
82 %ret = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(
84 metadata !"fpexcept.strict") #0
88 define <4 x half> @f12(<4 x double> %a) #0 {
91 ; CHECK-NEXT: vcvtpd2ph %ymm0, %xmm0
92 ; CHECK-NEXT: vzeroupper
93 ; CHECK-NEXT: ret{{[l|q]}}
94 %ret = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(
96 metadata !"round.dynamic",
97 metadata !"fpexcept.strict") #0
101 define <8 x float> @f14(<8 x half> %a) #0 {
104 ; CHECK-NEXT: vcvtph2psx %xmm0, %ymm0
105 ; CHECK-NEXT: ret{{[l|q]}}
106 %ret = call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(
108 metadata !"fpexcept.strict") #0
112 define <8 x half> @f15(<8 x float> %a) #0 {
115 ; CHECK-NEXT: vcvtps2phx %ymm0, %xmm0
116 ; CHECK-NEXT: vzeroupper
117 ; CHECK-NEXT: ret{{[l|q]}}
118 %ret = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(
120 metadata !"round.dynamic",
121 metadata !"fpexcept.strict") #0
125 define <16 x half> @fceilv16f16(<16 x half> %f) #0 {
126 ; CHECK-LABEL: fceilv16f16:
128 ; CHECK-NEXT: vrndscaleph $10, %ymm0, %ymm0
129 ; CHECK-NEXT: ret{{[l|q]}}
130 %res = call <16 x half> @llvm.experimental.constrained.ceil.v16f16(
131 <16 x half> %f, metadata !"fpexcept.strict") #0
135 define <16 x half> @ffloorv16f16(<16 x half> %f) #0 {
136 ; CHECK-LABEL: ffloorv16f16:
138 ; CHECK-NEXT: vrndscaleph $9, %ymm0, %ymm0
139 ; CHECK-NEXT: ret{{[l|q]}}
140 %res = call <16 x half> @llvm.experimental.constrained.floor.v16f16(
141 <16 x half> %f, metadata !"fpexcept.strict") #0
146 define <16 x half> @ftruncv16f16(<16 x half> %f) #0 {
147 ; CHECK-LABEL: ftruncv16f16:
149 ; CHECK-NEXT: vrndscaleph $11, %ymm0, %ymm0
150 ; CHECK-NEXT: ret{{[l|q]}}
151 %res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(
152 <16 x half> %f, metadata !"fpexcept.strict") #0
156 define <16 x half> @frintv16f16(<16 x half> %f) #0 {
157 ; CHECK-LABEL: frintv16f16:
159 ; CHECK-NEXT: vrndscaleph $4, %ymm0, %ymm0
160 ; CHECK-NEXT: ret{{[l|q]}}
161 %res = call <16 x half> @llvm.experimental.constrained.rint.v16f16(
163 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
167 define <16 x half> @fnearbyintv16f16(<16 x half> %f) #0 {
168 ; CHECK-LABEL: fnearbyintv16f16:
170 ; CHECK-NEXT: vrndscaleph $12, %ymm0, %ymm0
171 ; CHECK-NEXT: ret{{[l|q]}}
172 %res = call <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(
174 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
178 attributes #0 = { strictfp }