1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
5 declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata)
6 declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
7 declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata)
8 declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata)
9 declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata)
10 declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata)
11 declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata)
12 declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata)
13 declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata)
14 declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata)
15 declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata)
16 declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata)
17 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
18 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
19 declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata)
20 declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata)
21 declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata)
22 declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata)
23 declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
24 declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata)
25 declare <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float>, metadata, metadata)
26 declare <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double>, metadata, metadata)
27 declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata)
28 declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata)
31 define <8 x double> @f1(<8 x double> %a, <8 x double> %b) #0 {
34 ; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
35 ; CHECK-NEXT: ret{{[l|q]}}
36 %ret = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %a, <8 x double> %b,
37 metadata !"round.dynamic",
38 metadata !"fpexcept.strict") #0
42 define <16 x float> @f2(<16 x float> %a, <16 x float> %b) #0 {
45 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
46 ; CHECK-NEXT: ret{{[l|q]}}
47 %ret = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %a, <16 x float> %b,
48 metadata !"round.dynamic",
49 metadata !"fpexcept.strict") #0
53 define <8 x double> @f3(<8 x double> %a, <8 x double> %b) #0 {
56 ; CHECK-NEXT: vsubpd %zmm1, %zmm0, %zmm0
57 ; CHECK-NEXT: ret{{[l|q]}}
58 %ret = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %a, <8 x double> %b,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict") #0
64 define <16 x float> @f4(<16 x float> %a, <16 x float> %b) #0 {
67 ; CHECK-NEXT: vsubps %zmm1, %zmm0, %zmm0
68 ; CHECK-NEXT: ret{{[l|q]}}
69 %ret = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %a, <16 x float> %b,
70 metadata !"round.dynamic",
71 metadata !"fpexcept.strict") #0
75 define <8 x double> @f5(<8 x double> %a, <8 x double> %b) #0 {
78 ; CHECK-NEXT: vmulpd %zmm1, %zmm0, %zmm0
79 ; CHECK-NEXT: ret{{[l|q]}}
80 %ret = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %a, <8 x double> %b,
81 metadata !"round.dynamic",
82 metadata !"fpexcept.strict") #0
86 define <16 x float> @f6(<16 x float> %a, <16 x float> %b) #0 {
89 ; CHECK-NEXT: vmulps %zmm1, %zmm0, %zmm0
90 ; CHECK-NEXT: ret{{[l|q]}}
91 %ret = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %a, <16 x float> %b,
92 metadata !"round.dynamic",
93 metadata !"fpexcept.strict") #0
97 define <8 x double> @f7(<8 x double> %a, <8 x double> %b) #0 {
100 ; CHECK-NEXT: vdivpd %zmm1, %zmm0, %zmm0
101 ; CHECK-NEXT: ret{{[l|q]}}
102 %ret = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %a, <8 x double> %b,
103 metadata !"round.dynamic",
104 metadata !"fpexcept.strict") #0
105 ret <8 x double> %ret
108 define <16 x float> @f8(<16 x float> %a, <16 x float> %b) #0 {
111 ; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0
112 ; CHECK-NEXT: ret{{[l|q]}}
113 %ret = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %a, <16 x float> %b,
114 metadata !"round.dynamic",
115 metadata !"fpexcept.strict") #0
116 ret <16 x float> %ret
119 define <8 x double> @f9(<8 x double> %a) #0 {
122 ; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
123 ; CHECK-NEXT: ret{{[l|q]}}
124 %ret = call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(
126 metadata !"round.dynamic",
127 metadata !"fpexcept.strict") #0
128 ret <8 x double> %ret
132 define <16 x float> @f10(<16 x float> %a) #0 {
135 ; CHECK-NEXT: vsqrtps %zmm0, %zmm0
136 ; CHECK-NEXT: ret{{[l|q]}}
137 %ret = call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(
139 metadata !"round.dynamic",
140 metadata !"fpexcept.strict") #0
141 ret <16 x float > %ret
144 define <8 x double> @f11(<8 x float> %a) #0 {
147 ; CHECK-NEXT: vcvtps2pd %ymm0, %zmm0
148 ; CHECK-NEXT: ret{{[l|q]}}
149 %ret = call <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(
151 metadata !"fpexcept.strict") #0
152 ret <8 x double> %ret
155 define <8 x float> @f12(<8 x double> %a) #0 {
158 ; CHECK-NEXT: vcvtpd2ps %zmm0, %ymm0
159 ; CHECK-NEXT: ret{{[l|q]}}
160 %ret = call <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(
162 metadata !"round.dynamic",
163 metadata !"fpexcept.strict") #0
167 define <16 x float> @f13(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
170 ; CHECK-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
171 ; CHECK-NEXT: ret{{[l|q]}}
172 %res = call <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c,
173 metadata !"round.dynamic",
174 metadata !"fpexcept.strict") #0
175 ret <16 x float> %res
178 define <8 x double> @f14(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
181 ; CHECK-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
182 ; CHECK-NEXT: ret{{[l|q]}}
183 %res = call <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c,
184 metadata !"round.dynamic",
185 metadata !"fpexcept.strict") #0
186 ret <8 x double> %res
189 define <16 x float> @strict_vector_fceil_v16f32(<16 x float> %f) #0 {
190 ; CHECK-LABEL: strict_vector_fceil_v16f32:
192 ; CHECK-NEXT: vrndscaleps $10, %zmm0, %zmm0
193 ; CHECK-NEXT: ret{{[l|q]}}
194 %res = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
195 ret <16 x float> %res
198 define <8 x double> @strict_vector_fceil_v8f64(<8 x double> %f) #0 {
199 ; CHECK-LABEL: strict_vector_fceil_v8f64:
201 ; CHECK-NEXT: vrndscalepd $10, %zmm0, %zmm0
202 ; CHECK-NEXT: ret{{[l|q]}}
203 %res = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
204 ret <8 x double> %res
207 define <16 x float> @strict_vector_ffloor_v16f32(<16 x float> %f) #0 {
208 ; CHECK-LABEL: strict_vector_ffloor_v16f32:
210 ; CHECK-NEXT: vrndscaleps $9, %zmm0, %zmm0
211 ; CHECK-NEXT: ret{{[l|q]}}
212 %res = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
213 ret <16 x float> %res
216 define <8 x double> @strict_vector_ffloor_v8f64(<8 x double> %f) #0 {
217 ; CHECK-LABEL: strict_vector_ffloor_v8f64:
219 ; CHECK-NEXT: vrndscalepd $9, %zmm0, %zmm0
220 ; CHECK-NEXT: ret{{[l|q]}}
221 %res = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
222 ret <8 x double> %res
225 define <16 x float> @strict_vector_ftrunc_v16f32(<16 x float> %f) #0 {
226 ; CHECK-LABEL: strict_vector_ftrunc_v16f32:
228 ; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0
229 ; CHECK-NEXT: ret{{[l|q]}}
230 %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
231 ret <16 x float> %res
234 define <8 x double> @strict_vector_ftrunc_v8f64(<8 x double> %f) #0 {
235 ; CHECK-LABEL: strict_vector_ftrunc_v8f64:
237 ; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0
238 ; CHECK-NEXT: ret{{[l|q]}}
239 %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
240 ret <8 x double> %res
243 define <16 x float> @strict_vector_frint_v16f32(<16 x float> %f) #0 {
244 ; CHECK-LABEL: strict_vector_frint_v16f32:
246 ; CHECK-NEXT: vrndscaleps $4, %zmm0, %zmm0
247 ; CHECK-NEXT: ret{{[l|q]}}
248 %res = call <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float> %f,
249 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
250 ret <16 x float> %res
253 define <8 x double> @strict_vector_frint_v8f64(<8 x double> %f) #0 {
254 ; CHECK-LABEL: strict_vector_frint_v8f64:
256 ; CHECK-NEXT: vrndscalepd $4, %zmm0, %zmm0
257 ; CHECK-NEXT: ret{{[l|q]}}
258 %res = call <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double> %f,
259 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
260 ret <8 x double> %res
263 define <16 x float> @strict_vector_fnearbyint_v16f32(<16 x float> %f) #0 {
264 ; CHECK-LABEL: strict_vector_fnearbyint_v16f32:
266 ; CHECK-NEXT: vrndscaleps $12, %zmm0, %zmm0
267 ; CHECK-NEXT: ret{{[l|q]}}
268 %res = call <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float> %f,
269 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
270 ret <16 x float> %res
273 define <8 x double> @strict_vector_fnearbyint_v8f64(<8 x double> %f) #0 {
274 ; CHECK-LABEL: strict_vector_fnearbyint_v8f64:
276 ; CHECK-NEXT: vrndscalepd $12, %zmm0, %zmm0
277 ; CHECK-NEXT: ret{{[l|q]}}
278 %res = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %f,
279 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
280 ret <8 x double> %res
283 attributes #0 = { strictfp }