1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 ; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type.
9 define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) strictfp {
10 ; CHECK-LABEL: round_nxv1f16:
12 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
13 ; CHECK-NEXT: vmfne.vv v0, v8, v8
14 ; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
15 ; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
16 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
17 ; CHECK-NEXT: vfabs.v v9, v8
18 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
19 ; CHECK-NEXT: fsrmi a0, 4
20 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
22 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
23 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
24 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
26 %a = call <vscale x 1 x half> @llvm.experimental.constrained.round.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict")
27 ret <vscale x 1 x half> %a
29 declare <vscale x 1 x half> @llvm.experimental.constrained.round.nxv1f16(<vscale x 1 x half>, metadata)
31 define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) strictfp {
32 ; CHECK-LABEL: round_nxv2f16:
34 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
35 ; CHECK-NEXT: vmfne.vv v0, v8, v8
36 ; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
37 ; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
38 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
39 ; CHECK-NEXT: vfabs.v v9, v8
40 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
41 ; CHECK-NEXT: fsrmi a0, 4
42 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
44 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
45 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
46 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
48 %a = call <vscale x 2 x half> @llvm.experimental.constrained.round.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict")
49 ret <vscale x 2 x half> %a
51 declare <vscale x 2 x half> @llvm.experimental.constrained.round.nxv2f16(<vscale x 2 x half>, metadata)
53 define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) strictfp {
54 ; CHECK-LABEL: round_nxv4f16:
56 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
57 ; CHECK-NEXT: vmfne.vv v0, v8, v8
58 ; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
59 ; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
60 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
61 ; CHECK-NEXT: vfabs.v v9, v8
62 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
63 ; CHECK-NEXT: fsrmi a0, 4
64 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
66 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
67 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
68 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
70 %a = call <vscale x 4 x half> @llvm.experimental.constrained.round.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict")
71 ret <vscale x 4 x half> %a
73 declare <vscale x 4 x half> @llvm.experimental.constrained.round.nxv4f16(<vscale x 4 x half>, metadata)
75 define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) strictfp {
76 ; CHECK-LABEL: round_nxv8f16:
78 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
79 ; CHECK-NEXT: vmfne.vv v0, v8, v8
80 ; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
81 ; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
82 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
83 ; CHECK-NEXT: vfabs.v v10, v8
84 ; CHECK-NEXT: vmflt.vf v0, v10, fa5
85 ; CHECK-NEXT: fsrmi a0, 4
86 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
88 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
89 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
90 ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
92 %a = call <vscale x 8 x half> @llvm.experimental.constrained.round.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict")
93 ret <vscale x 8 x half> %a
95 declare <vscale x 8 x half> @llvm.experimental.constrained.round.nxv8f16(<vscale x 8 x half>, metadata)
97 define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) strictfp {
98 ; CHECK-LABEL: round_nxv16f16:
100 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
101 ; CHECK-NEXT: vmfne.vv v0, v8, v8
102 ; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
103 ; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
104 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
105 ; CHECK-NEXT: vfabs.v v12, v8
106 ; CHECK-NEXT: vmflt.vf v0, v12, fa5
107 ; CHECK-NEXT: fsrmi a0, 4
108 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
109 ; CHECK-NEXT: fsrm a0
110 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
111 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
112 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
114 %a = call <vscale x 16 x half> @llvm.experimental.constrained.round.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict")
115 ret <vscale x 16 x half> %a
117 declare <vscale x 16 x half> @llvm.experimental.constrained.round.nxv16f16(<vscale x 16 x half>, metadata)
119 define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) strictfp {
120 ; CHECK-LABEL: round_nxv32f16:
122 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
123 ; CHECK-NEXT: vmfne.vv v0, v8, v8
124 ; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
125 ; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
126 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
127 ; CHECK-NEXT: vfabs.v v16, v8
128 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
129 ; CHECK-NEXT: fsrmi a0, 4
130 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
131 ; CHECK-NEXT: fsrm a0
132 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
133 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
134 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
136 %a = call <vscale x 32 x half> @llvm.experimental.constrained.round.nxv32f16(<vscale x 32 x half> %x, metadata !"fpexcept.strict")
137 ret <vscale x 32 x half> %a
139 declare <vscale x 32 x half> @llvm.experimental.constrained.round.nxv32f16(<vscale x 32 x half>, metadata)
141 define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) strictfp {
142 ; CHECK-LABEL: round_nxv1f32:
144 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
145 ; CHECK-NEXT: vmfne.vv v0, v8, v8
146 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
147 ; CHECK-NEXT: vfabs.v v9, v8
148 ; CHECK-NEXT: lui a0, 307200
149 ; CHECK-NEXT: fmv.w.x fa5, a0
150 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
151 ; CHECK-NEXT: fsrmi a0, 4
152 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
153 ; CHECK-NEXT: fsrm a0
154 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
155 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
156 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
158 %a = call <vscale x 1 x float> @llvm.experimental.constrained.round.nxv1f32(<vscale x 1 x float> %x, metadata !"fpexcept.strict")
159 ret <vscale x 1 x float> %a
161 declare <vscale x 1 x float> @llvm.experimental.constrained.round.nxv1f32(<vscale x 1 x float>, metadata)
163 define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) strictfp {
164 ; CHECK-LABEL: round_nxv2f32:
166 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
167 ; CHECK-NEXT: vmfne.vv v0, v8, v8
168 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
169 ; CHECK-NEXT: vfabs.v v9, v8
170 ; CHECK-NEXT: lui a0, 307200
171 ; CHECK-NEXT: fmv.w.x fa5, a0
172 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
173 ; CHECK-NEXT: fsrmi a0, 4
174 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
175 ; CHECK-NEXT: fsrm a0
176 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
177 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
178 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
180 %a = call <vscale x 2 x float> @llvm.experimental.constrained.round.nxv2f32(<vscale x 2 x float> %x, metadata !"fpexcept.strict")
181 ret <vscale x 2 x float> %a
183 declare <vscale x 2 x float> @llvm.experimental.constrained.round.nxv2f32(<vscale x 2 x float>, metadata)
185 define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) strictfp {
186 ; CHECK-LABEL: round_nxv4f32:
188 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
189 ; CHECK-NEXT: vmfne.vv v0, v8, v8
190 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
191 ; CHECK-NEXT: vfabs.v v10, v8
192 ; CHECK-NEXT: lui a0, 307200
193 ; CHECK-NEXT: fmv.w.x fa5, a0
194 ; CHECK-NEXT: vmflt.vf v0, v10, fa5
195 ; CHECK-NEXT: fsrmi a0, 4
196 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
197 ; CHECK-NEXT: fsrm a0
198 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
199 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
200 ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
202 %a = call <vscale x 4 x float> @llvm.experimental.constrained.round.nxv4f32(<vscale x 4 x float> %x, metadata !"fpexcept.strict")
203 ret <vscale x 4 x float> %a
205 declare <vscale x 4 x float> @llvm.experimental.constrained.round.nxv4f32(<vscale x 4 x float>, metadata)
207 define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) strictfp {
208 ; CHECK-LABEL: round_nxv8f32:
210 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
211 ; CHECK-NEXT: vmfne.vv v0, v8, v8
212 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
213 ; CHECK-NEXT: vfabs.v v12, v8
214 ; CHECK-NEXT: lui a0, 307200
215 ; CHECK-NEXT: fmv.w.x fa5, a0
216 ; CHECK-NEXT: vmflt.vf v0, v12, fa5
217 ; CHECK-NEXT: fsrmi a0, 4
218 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
219 ; CHECK-NEXT: fsrm a0
220 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
221 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
222 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
224 %a = call <vscale x 8 x float> @llvm.experimental.constrained.round.nxv8f32(<vscale x 8 x float> %x, metadata !"fpexcept.strict")
225 ret <vscale x 8 x float> %a
227 declare <vscale x 8 x float> @llvm.experimental.constrained.round.nxv8f32(<vscale x 8 x float>, metadata)
229 define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) strictfp {
230 ; CHECK-LABEL: round_nxv16f32:
232 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
233 ; CHECK-NEXT: vmfne.vv v0, v8, v8
234 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
235 ; CHECK-NEXT: vfabs.v v16, v8
236 ; CHECK-NEXT: lui a0, 307200
237 ; CHECK-NEXT: fmv.w.x fa5, a0
238 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
239 ; CHECK-NEXT: fsrmi a0, 4
240 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
241 ; CHECK-NEXT: fsrm a0
242 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
243 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
244 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
246 %a = call <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float> %x, metadata !"fpexcept.strict")
247 ret <vscale x 16 x float> %a
249 declare <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float>, metadata)
251 define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) strictfp {
252 ; CHECK-LABEL: round_nxv1f64:
254 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
255 ; CHECK-NEXT: vmfne.vv v0, v8, v8
256 ; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
257 ; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
258 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
259 ; CHECK-NEXT: vfabs.v v9, v8
260 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
261 ; CHECK-NEXT: fsrmi a0, 4
262 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
263 ; CHECK-NEXT: fsrm a0
264 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
265 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
266 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
268 %a = call <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
269 ret <vscale x 1 x double> %a
271 declare <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double>, metadata)
273 define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) strictfp {
274 ; CHECK-LABEL: round_nxv2f64:
276 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
277 ; CHECK-NEXT: vmfne.vv v0, v8, v8
278 ; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
279 ; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
280 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
281 ; CHECK-NEXT: vfabs.v v10, v8
282 ; CHECK-NEXT: vmflt.vf v0, v10, fa5
283 ; CHECK-NEXT: fsrmi a0, 4
284 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
285 ; CHECK-NEXT: fsrm a0
286 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
287 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
288 ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
290 %a = call <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
291 ret <vscale x 2 x double> %a
293 declare <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double>, metadata)
295 define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) strictfp {
296 ; CHECK-LABEL: round_nxv4f64:
298 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
299 ; CHECK-NEXT: vmfne.vv v0, v8, v8
300 ; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
301 ; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
302 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
303 ; CHECK-NEXT: vfabs.v v12, v8
304 ; CHECK-NEXT: vmflt.vf v0, v12, fa5
305 ; CHECK-NEXT: fsrmi a0, 4
306 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
307 ; CHECK-NEXT: fsrm a0
308 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
309 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
310 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
312 %a = call <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
313 ret <vscale x 4 x double> %a
315 declare <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double>, metadata)
317 define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) strictfp {
318 ; CHECK-LABEL: round_nxv8f64:
320 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
321 ; CHECK-NEXT: vmfne.vv v0, v8, v8
322 ; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
323 ; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
324 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
325 ; CHECK-NEXT: vfabs.v v16, v8
326 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
327 ; CHECK-NEXT: fsrmi a0, 4
328 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
329 ; CHECK-NEXT: fsrm a0
330 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
331 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
332 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
334 %a = call <vscale x 8 x double> @llvm.experimental.constrained.round.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
335 ret <vscale x 8 x double> %a
337 declare <vscale x 8 x double> @llvm.experimental.constrained.round.nxv8f64(<vscale x 8 x double>, metadata)