1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+fma -O3 | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma -O3 | FileCheck %s
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
7 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
8 declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata)
9 declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
10 declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata)
11 declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
12 declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata)
13 declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
14 declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata)
15 declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
16 declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata)
17 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
18 declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
19 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
20 declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
21 declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata)
22 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
23 declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata)
24 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
25 declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
26 declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
27 declare <8 x float> @llvm.experimental.constrained.rint.v8f32(<8 x float>, metadata, metadata)
28 declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata)
29 declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata)
30 declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
32 define <4 x double> @f1(<4 x double> %a, <4 x double> %b) #0 {
35 ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
36 ; CHECK-NEXT: ret{{[l|q]}}
37 %ret = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %a, <4 x double> %b,
38 metadata !"round.dynamic",
39 metadata !"fpexcept.strict") #0
43 define <8 x float> @f2(<8 x float> %a, <8 x float> %b) #0 {
46 ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
47 ; CHECK-NEXT: ret{{[l|q]}}
48 %ret = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %a, <8 x float> %b,
49 metadata !"round.dynamic",
50 metadata !"fpexcept.strict") #0
54 define <4 x double> @f3(<4 x double> %a, <4 x double> %b) #0 {
57 ; CHECK-NEXT: vsubpd %ymm1, %ymm0, %ymm0
58 ; CHECK-NEXT: ret{{[l|q]}}
59 %ret = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %a, <4 x double> %b,
60 metadata !"round.dynamic",
61 metadata !"fpexcept.strict") #0
65 define <8 x float> @f4(<8 x float> %a, <8 x float> %b) #0 {
68 ; CHECK-NEXT: vsubps %ymm1, %ymm0, %ymm0
69 ; CHECK-NEXT: ret{{[l|q]}}
70 %ret = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %a, <8 x float> %b,
71 metadata !"round.dynamic",
72 metadata !"fpexcept.strict") #0
76 define <4 x double> @f5(<4 x double> %a, <4 x double> %b) #0 {
79 ; CHECK-NEXT: vmulpd %ymm1, %ymm0, %ymm0
80 ; CHECK-NEXT: ret{{[l|q]}}
81 %ret = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %a, <4 x double> %b,
82 metadata !"round.dynamic",
83 metadata !"fpexcept.strict") #0
87 define <8 x float> @f6(<8 x float> %a, <8 x float> %b) #0 {
90 ; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm0
91 ; CHECK-NEXT: ret{{[l|q]}}
92 %ret = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %a, <8 x float> %b,
93 metadata !"round.dynamic",
94 metadata !"fpexcept.strict") #0
98 define <4 x double> @f7(<4 x double> %a, <4 x double> %b) #0 {
101 ; CHECK-NEXT: vdivpd %ymm1, %ymm0, %ymm0
102 ; CHECK-NEXT: ret{{[l|q]}}
103 %ret = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %a, <4 x double> %b,
104 metadata !"round.dynamic",
105 metadata !"fpexcept.strict") #0
106 ret <4 x double> %ret
109 define <8 x float> @f8(<8 x float> %a, <8 x float> %b) #0 {
112 ; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm0
113 ; CHECK-NEXT: ret{{[l|q]}}
114 %ret = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %a, <8 x float> %b,
115 metadata !"round.dynamic",
116 metadata !"fpexcept.strict") #0
120 define <4 x double> @f9(<4 x double> %a) #0 {
123 ; CHECK-NEXT: vsqrtpd %ymm0, %ymm0
124 ; CHECK-NEXT: ret{{[l|q]}}
125 %ret = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(
127 metadata !"round.dynamic",
128 metadata !"fpexcept.strict") #0
129 ret <4 x double> %ret
133 define <8 x float> @f10(<8 x float> %a) #0 {
136 ; CHECK-NEXT: vsqrtps %ymm0, %ymm0
137 ; CHECK-NEXT: ret{{[l|q]}}
138 %ret = call <8 x float> @llvm.experimental.constrained.sqrt.v8f32(
140 metadata !"round.dynamic",
141 metadata !"fpexcept.strict") #0
142 ret <8 x float > %ret
145 define <4 x double> @f11(<4 x float> %a) #0 {
148 ; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0
149 ; CHECK-NEXT: ret{{[l|q]}}
150 %ret = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
152 metadata !"fpexcept.strict") #0
153 ret <4 x double> %ret
156 define <4 x float> @f12(<4 x double> %a) #0 {
159 ; CHECK-NEXT: vcvtpd2ps %ymm0, %xmm0
160 ; CHECK-NEXT: vzeroupper
161 ; CHECK-NEXT: ret{{[l|q]}}
162 %ret = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(
164 metadata !"round.dynamic",
165 metadata !"fpexcept.strict") #0
169 define <8 x float> @f13(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
172 ; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
173 ; CHECK-NEXT: ret{{[l|q]}}
174 %res = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c,
175 metadata !"round.dynamic",
176 metadata !"fpexcept.strict") #0
180 define <4 x double> @f14(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
183 ; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
184 ; CHECK-NEXT: ret{{[l|q]}}
185 %res = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c,
186 metadata !"round.dynamic",
187 metadata !"fpexcept.strict") #0
188 ret <4 x double> %res
191 define <8 x float> @fceilv8f32(<8 x float> %f) #0 {
192 ; CHECK-LABEL: fceilv8f32:
194 ; CHECK-NEXT: vroundps $10, %ymm0, %ymm0
195 ; CHECK-NEXT: ret{{[l|q]}}
196 %res = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(
197 <8 x float> %f, metadata !"fpexcept.strict") #0
201 define <4 x double> @fceilv4f64(<4 x double> %f) #0 {
202 ; CHECK-LABEL: fceilv4f64:
204 ; CHECK-NEXT: vroundpd $10, %ymm0, %ymm0
205 ; CHECK-NEXT: ret{{[l|q]}}
206 %res = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(
207 <4 x double> %f, metadata !"fpexcept.strict") #0
208 ret <4 x double> %res
211 define <8 x float> @ffloorv8f32(<8 x float> %f) #0 {
212 ; CHECK-LABEL: ffloorv8f32:
214 ; CHECK-NEXT: vroundps $9, %ymm0, %ymm0
215 ; CHECK-NEXT: ret{{[l|q]}}
216 %res = call <8 x float> @llvm.experimental.constrained.floor.v8f32(
217 <8 x float> %f, metadata !"fpexcept.strict") #0
221 define <4 x double> @ffloorv4f64(<4 x double> %f) #0 {
222 ; CHECK-LABEL: ffloorv4f64:
224 ; CHECK-NEXT: vroundpd $9, %ymm0, %ymm0
225 ; CHECK-NEXT: ret{{[l|q]}}
226 %res = call <4 x double> @llvm.experimental.constrained.floor.v4f64(
227 <4 x double> %f, metadata !"fpexcept.strict") #0
228 ret <4 x double> %res
232 define <8 x float> @ftruncv8f32(<8 x float> %f) #0 {
233 ; CHECK-LABEL: ftruncv8f32:
235 ; CHECK-NEXT: vroundps $11, %ymm0, %ymm0
236 ; CHECK-NEXT: ret{{[l|q]}}
237 %res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(
238 <8 x float> %f, metadata !"fpexcept.strict") #0
242 define <4 x double> @ftruncv4f64(<4 x double> %f) #0 {
243 ; CHECK-LABEL: ftruncv4f64:
245 ; CHECK-NEXT: vroundpd $11, %ymm0, %ymm0
246 ; CHECK-NEXT: ret{{[l|q]}}
247 %res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(
248 <4 x double> %f, metadata !"fpexcept.strict") #0
249 ret <4 x double> %res
253 define <8 x float> @frintv8f32(<8 x float> %f) #0 {
254 ; CHECK-LABEL: frintv8f32:
256 ; CHECK-NEXT: vroundps $4, %ymm0, %ymm0
257 ; CHECK-NEXT: ret{{[l|q]}}
258 %res = call <8 x float> @llvm.experimental.constrained.rint.v8f32(
260 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
264 define <4 x double> @frintv4f64(<4 x double> %f) #0 {
265 ; CHECK-LABEL: frintv4f64:
267 ; CHECK-NEXT: vroundpd $4, %ymm0, %ymm0
268 ; CHECK-NEXT: ret{{[l|q]}}
269 %res = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
271 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
272 ret <4 x double> %res
276 define <8 x float> @fnearbyintv8f32(<8 x float> %f) #0 {
277 ; CHECK-LABEL: fnearbyintv8f32:
279 ; CHECK-NEXT: vroundps $12, %ymm0, %ymm0
280 ; CHECK-NEXT: ret{{[l|q]}}
281 %res = call <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(
283 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
287 define <4 x double> @fnearbyintv4f64(<4 x double> %f) #0 {
288 ; CHECK-LABEL: fnearbyintv4f64:
290 ; CHECK-NEXT: vroundpd $12, %ymm0, %ymm0
291 ; CHECK-NEXT: ret{{[l|q]}}
292 %res = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
294 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
295 ret <4 x double> %res
298 attributes #0 = { strictfp }