1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; ZVFH-LABEL: vp_roundeven_nxv1f16:
16 ; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
17 ; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
18 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
19 ; ZVFH-NEXT: vfabs.v v9, v8, v0.t
20 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
21 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
22 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
23 ; ZVFH-NEXT: fsrmi a0, 0
24 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
26 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
27 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
28 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
31 ; ZVFHMIN-LABEL: vp_roundeven_nxv1f16:
33 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
34 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
35 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
36 ; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t
37 ; ZVFHMIN-NEXT: lui a0, 307200
38 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
39 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
40 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
41 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
42 ; ZVFHMIN-NEXT: fsrmi a0, 0
43 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
44 ; ZVFHMIN-NEXT: fsrm a0
45 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
46 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
47 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
48 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
49 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
51 %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
52 ret <vscale x 1 x half> %v
55 define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
56 ; ZVFH-LABEL: vp_roundeven_nxv1f16_unmasked:
58 ; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
59 ; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
60 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
61 ; ZVFH-NEXT: vfabs.v v9, v8
62 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5
63 ; ZVFH-NEXT: fsrmi a0, 0
64 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
66 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
67 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
68 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
71 ; ZVFHMIN-LABEL: vp_roundeven_nxv1f16_unmasked:
73 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
74 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
75 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
76 ; ZVFHMIN-NEXT: vfabs.v v8, v9
77 ; ZVFHMIN-NEXT: lui a0, 307200
78 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
79 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
80 ; ZVFHMIN-NEXT: fsrmi a0, 0
81 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
82 ; ZVFHMIN-NEXT: fsrm a0
83 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
84 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
85 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
86 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
87 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
89 %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
90 %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
91 %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
92 ret <vscale x 1 x half> %v
95 declare <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
97 define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
98 ; ZVFH-LABEL: vp_roundeven_nxv2f16:
100 ; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
101 ; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
102 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
103 ; ZVFH-NEXT: vfabs.v v9, v8, v0.t
104 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
105 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
106 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
107 ; ZVFH-NEXT: fsrmi a0, 0
108 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
110 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
111 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
112 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
115 ; ZVFHMIN-LABEL: vp_roundeven_nxv2f16:
117 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
118 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
119 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
120 ; ZVFHMIN-NEXT: vfabs.v v8, v9, v0.t
121 ; ZVFHMIN-NEXT: lui a0, 307200
122 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
123 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
124 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
125 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
126 ; ZVFHMIN-NEXT: fsrmi a0, 0
127 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
128 ; ZVFHMIN-NEXT: fsrm a0
129 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
130 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
131 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
132 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
133 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
135 %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
136 ret <vscale x 2 x half> %v
139 define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
140 ; ZVFH-LABEL: vp_roundeven_nxv2f16_unmasked:
142 ; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
143 ; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
144 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
145 ; ZVFH-NEXT: vfabs.v v9, v8
146 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5
147 ; ZVFH-NEXT: fsrmi a0, 0
148 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
150 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
151 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
152 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
155 ; ZVFHMIN-LABEL: vp_roundeven_nxv2f16_unmasked:
157 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
158 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
159 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
160 ; ZVFHMIN-NEXT: vfabs.v v8, v9
161 ; ZVFHMIN-NEXT: lui a0, 307200
162 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
163 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
164 ; ZVFHMIN-NEXT: fsrmi a0, 0
165 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
166 ; ZVFHMIN-NEXT: fsrm a0
167 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
168 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
169 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v8, v9, v0.t
170 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
171 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
173 %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
174 %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
175 %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
176 ret <vscale x 2 x half> %v
179 declare <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32)
181 define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
182 ; ZVFH-LABEL: vp_roundeven_nxv4f16:
184 ; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
185 ; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
186 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
187 ; ZVFH-NEXT: vfabs.v v9, v8, v0.t
188 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
189 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
190 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
191 ; ZVFH-NEXT: fsrmi a0, 0
192 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
194 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
195 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
196 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
199 ; ZVFHMIN-LABEL: vp_roundeven_nxv4f16:
201 ; ZVFHMIN-NEXT: vmv1r.v v9, v0
202 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
203 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
204 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
205 ; ZVFHMIN-NEXT: vfabs.v v12, v10, v0.t
206 ; ZVFHMIN-NEXT: lui a0, 307200
207 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
208 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
209 ; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
210 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
211 ; ZVFHMIN-NEXT: fsrmi a0, 0
212 ; ZVFHMIN-NEXT: vmv1r.v v0, v9
213 ; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
214 ; ZVFHMIN-NEXT: fsrm a0
215 ; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
216 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
217 ; ZVFHMIN-NEXT: vfsgnj.vv v10, v12, v10, v0.t
218 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
219 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
221 %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
222 ret <vscale x 4 x half> %v
225 define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
226 ; ZVFH-LABEL: vp_roundeven_nxv4f16_unmasked:
228 ; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
229 ; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
230 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
231 ; ZVFH-NEXT: vfabs.v v9, v8
232 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5
233 ; ZVFH-NEXT: fsrmi a0, 0
234 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
236 ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
237 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
238 ; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
241 ; ZVFHMIN-LABEL: vp_roundeven_nxv4f16_unmasked:
243 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
244 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
245 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
246 ; ZVFHMIN-NEXT: vfabs.v v8, v10
247 ; ZVFHMIN-NEXT: lui a0, 307200
248 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
249 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
250 ; ZVFHMIN-NEXT: fsrmi a0, 0
251 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v10, v0.t
252 ; ZVFHMIN-NEXT: fsrm a0
253 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
254 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
255 ; ZVFHMIN-NEXT: vfsgnj.vv v10, v8, v10, v0.t
256 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
257 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
259 %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
260 %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
261 %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
262 ret <vscale x 4 x half> %v
265 declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32)
267 define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
268 ; ZVFH-LABEL: vp_roundeven_nxv8f16:
270 ; ZVFH-NEXT: vmv1r.v v10, v0
271 ; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
272 ; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
273 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
274 ; ZVFH-NEXT: vfabs.v v12, v8, v0.t
275 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
276 ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
277 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
278 ; ZVFH-NEXT: fsrmi a0, 0
279 ; ZVFH-NEXT: vmv1r.v v0, v10
280 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
282 ; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
283 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
284 ; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
287 ; ZVFHMIN-LABEL: vp_roundeven_nxv8f16:
289 ; ZVFHMIN-NEXT: vmv1r.v v10, v0
290 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
291 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
292 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
293 ; ZVFHMIN-NEXT: vfabs.v v16, v12, v0.t
294 ; ZVFHMIN-NEXT: lui a0, 307200
295 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
296 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
297 ; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
298 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
299 ; ZVFHMIN-NEXT: fsrmi a0, 0
300 ; ZVFHMIN-NEXT: vmv1r.v v0, v10
301 ; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
302 ; ZVFHMIN-NEXT: fsrm a0
303 ; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
304 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
305 ; ZVFHMIN-NEXT: vfsgnj.vv v12, v16, v12, v0.t
306 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
307 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
309 %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
310 ret <vscale x 8 x half> %v
313 define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
314 ; ZVFH-LABEL: vp_roundeven_nxv8f16_unmasked:
316 ; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
317 ; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
318 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
319 ; ZVFH-NEXT: vfabs.v v10, v8
320 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5
321 ; ZVFH-NEXT: fsrmi a0, 0
322 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
324 ; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
325 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
326 ; ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
329 ; ZVFHMIN-LABEL: vp_roundeven_nxv8f16_unmasked:
331 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
332 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
333 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
334 ; ZVFHMIN-NEXT: vfabs.v v8, v12
335 ; ZVFHMIN-NEXT: lui a0, 307200
336 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
337 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
338 ; ZVFHMIN-NEXT: fsrmi a0, 0
339 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v12, v0.t
340 ; ZVFHMIN-NEXT: fsrm a0
341 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
342 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
343 ; ZVFHMIN-NEXT: vfsgnj.vv v12, v8, v12, v0.t
344 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
345 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
347 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
348 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
349 %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
350 ret <vscale x 8 x half> %v
353 declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32)
355 define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
356 ; ZVFH-LABEL: vp_roundeven_nxv16f16:
358 ; ZVFH-NEXT: vmv1r.v v12, v0
359 ; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
360 ; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
361 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
362 ; ZVFH-NEXT: vfabs.v v16, v8, v0.t
363 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
364 ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
365 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
366 ; ZVFH-NEXT: fsrmi a0, 0
367 ; ZVFH-NEXT: vmv1r.v v0, v12
368 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
370 ; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
371 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
372 ; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
375 ; ZVFHMIN-LABEL: vp_roundeven_nxv16f16:
377 ; ZVFHMIN-NEXT: vmv1r.v v12, v0
378 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
379 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
380 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
381 ; ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
382 ; ZVFHMIN-NEXT: lui a0, 307200
383 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
384 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
385 ; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
386 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
387 ; ZVFHMIN-NEXT: fsrmi a0, 0
388 ; ZVFHMIN-NEXT: vmv1r.v v0, v12
389 ; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
390 ; ZVFHMIN-NEXT: fsrm a0
391 ; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
392 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
393 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
394 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
395 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
397 %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
398 ret <vscale x 16 x half> %v
401 define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
402 ; ZVFH-LABEL: vp_roundeven_nxv16f16_unmasked:
404 ; ZVFH-NEXT: lui a1, %hi(.LCPI9_0)
405 ; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a1)
406 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
407 ; ZVFH-NEXT: vfabs.v v12, v8
408 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5
409 ; ZVFH-NEXT: fsrmi a0, 0
410 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
412 ; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
413 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
414 ; ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
417 ; ZVFHMIN-LABEL: vp_roundeven_nxv16f16_unmasked:
419 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
420 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
421 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
422 ; ZVFHMIN-NEXT: vfabs.v v8, v16
423 ; ZVFHMIN-NEXT: lui a0, 307200
424 ; ZVFHMIN-NEXT: fmv.w.x fa5, a0
425 ; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5
426 ; ZVFHMIN-NEXT: fsrmi a0, 0
427 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v16, v0.t
428 ; ZVFHMIN-NEXT: fsrm a0
429 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
430 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
431 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v8, v16, v0.t
432 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
433 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
435 %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
436 %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
437 %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
438 ret <vscale x 16 x half> %v
441 declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32)
443 define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
444 ; ZVFH-LABEL: vp_roundeven_nxv32f16:
446 ; ZVFH-NEXT: vmv1r.v v16, v0
447 ; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
448 ; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
449 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
450 ; ZVFH-NEXT: vfabs.v v24, v8, v0.t
451 ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
452 ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
453 ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
454 ; ZVFH-NEXT: fsrmi a0, 0
455 ; ZVFH-NEXT: vmv1r.v v0, v16
456 ; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
458 ; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
459 ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
460 ; ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
463 ; ZVFHMIN-LABEL: vp_roundeven_nxv32f16:
465 ; ZVFHMIN-NEXT: addi sp, sp, -16
466 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
467 ; ZVFHMIN-NEXT: csrr a1, vlenb
468 ; ZVFHMIN-NEXT: slli a1, a1, 3
469 ; ZVFHMIN-NEXT: sub sp, sp, a1
470 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
471 ; ZVFHMIN-NEXT: vmv1r.v v16, v0
472 ; ZVFHMIN-NEXT: csrr a2, vlenb
473 ; ZVFHMIN-NEXT: slli a1, a2, 1
474 ; ZVFHMIN-NEXT: sub a3, a0, a1
475 ; ZVFHMIN-NEXT: sltu a4, a0, a3
476 ; ZVFHMIN-NEXT: addi a4, a4, -1
477 ; ZVFHMIN-NEXT: and a3, a4, a3
478 ; ZVFHMIN-NEXT: srli a2, a2, 2
479 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
480 ; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
481 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
482 ; ZVFHMIN-NEXT: addi a2, sp, 16
483 ; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
484 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
485 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
486 ; ZVFHMIN-NEXT: vmv1r.v v0, v17
487 ; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
488 ; ZVFHMIN-NEXT: lui a2, 307200
489 ; ZVFHMIN-NEXT: fmv.w.x fa5, a2
490 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
491 ; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
492 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
493 ; ZVFHMIN-NEXT: fsrmi a2, 0
494 ; ZVFHMIN-NEXT: vmv1r.v v0, v17
495 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
496 ; ZVFHMIN-NEXT: fsrm a2
497 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
498 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
499 ; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
500 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
501 ; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
502 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
503 ; ZVFHMIN-NEXT: # %bb.1:
504 ; ZVFHMIN-NEXT: mv a0, a1
505 ; ZVFHMIN-NEXT: .LBB10_2:
506 ; ZVFHMIN-NEXT: addi a1, sp, 16
507 ; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
508 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
509 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
510 ; ZVFHMIN-NEXT: vmv1r.v v0, v16
511 ; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
512 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
513 ; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
514 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
515 ; ZVFHMIN-NEXT: fsrmi a0, 0
516 ; ZVFHMIN-NEXT: vmv1r.v v0, v16
517 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
518 ; ZVFHMIN-NEXT: fsrm a0
519 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
520 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
521 ; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
522 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
523 ; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
524 ; ZVFHMIN-NEXT: vmv8r.v v8, v16
525 ; ZVFHMIN-NEXT: csrr a0, vlenb
526 ; ZVFHMIN-NEXT: slli a0, a0, 3
527 ; ZVFHMIN-NEXT: add sp, sp, a0
528 ; ZVFHMIN-NEXT: addi sp, sp, 16
530 %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
531 ret <vscale x 32 x half> %v
534 define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
535 ; ZVFH-LABEL: vp_roundeven_nxv32f16_unmasked:
537 ; ZVFH-NEXT: lui a1, %hi(.LCPI11_0)
538 ; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a1)
539 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
540 ; ZVFH-NEXT: vfabs.v v16, v8
541 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5
542 ; ZVFH-NEXT: fsrmi a0, 0
543 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
545 ; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
546 ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
547 ; ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
550 ; ZVFHMIN-LABEL: vp_roundeven_nxv32f16_unmasked:
552 ; ZVFHMIN-NEXT: addi sp, sp, -16
553 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
554 ; ZVFHMIN-NEXT: csrr a1, vlenb
555 ; ZVFHMIN-NEXT: slli a1, a1, 3
556 ; ZVFHMIN-NEXT: sub sp, sp, a1
557 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
558 ; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
559 ; ZVFHMIN-NEXT: vmset.m v16
560 ; ZVFHMIN-NEXT: csrr a2, vlenb
561 ; ZVFHMIN-NEXT: slli a1, a2, 1
562 ; ZVFHMIN-NEXT: sub a3, a0, a1
563 ; ZVFHMIN-NEXT: sltu a4, a0, a3
564 ; ZVFHMIN-NEXT: addi a4, a4, -1
565 ; ZVFHMIN-NEXT: and a3, a4, a3
566 ; ZVFHMIN-NEXT: srli a2, a2, 2
567 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
568 ; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
569 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
570 ; ZVFHMIN-NEXT: addi a2, sp, 16
571 ; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
572 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
573 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
574 ; ZVFHMIN-NEXT: vmv1r.v v0, v16
575 ; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
576 ; ZVFHMIN-NEXT: lui a2, 307200
577 ; ZVFHMIN-NEXT: fmv.w.x fa5, a2
578 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
579 ; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
580 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
581 ; ZVFHMIN-NEXT: fsrmi a2, 0
582 ; ZVFHMIN-NEXT: vmv1r.v v0, v16
583 ; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
584 ; ZVFHMIN-NEXT: fsrm a2
585 ; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
586 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
587 ; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
588 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
589 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
590 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
591 ; ZVFHMIN-NEXT: # %bb.1:
592 ; ZVFHMIN-NEXT: mv a0, a1
593 ; ZVFHMIN-NEXT: .LBB11_2:
594 ; ZVFHMIN-NEXT: addi a1, sp, 16
595 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
596 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
597 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
598 ; ZVFHMIN-NEXT: vfabs.v v24, v16
599 ; ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
600 ; ZVFHMIN-NEXT: fsrmi a0, 0
601 ; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
602 ; ZVFHMIN-NEXT: fsrm a0
603 ; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
604 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
605 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
606 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
607 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
608 ; ZVFHMIN-NEXT: csrr a0, vlenb
609 ; ZVFHMIN-NEXT: slli a0, a0, 3
610 ; ZVFHMIN-NEXT: add sp, sp, a0
611 ; ZVFHMIN-NEXT: addi sp, sp, 16
613 %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
614 %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
615 %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
616 ret <vscale x 32 x half> %v
619 declare <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
621 define <vscale x 1 x float> @vp_roundeven_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
622 ; CHECK-LABEL: vp_roundeven_nxv1f32:
624 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
625 ; CHECK-NEXT: vfabs.v v9, v8, v0.t
626 ; CHECK-NEXT: lui a0, 307200
627 ; CHECK-NEXT: fmv.w.x fa5, a0
628 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
629 ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
630 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
631 ; CHECK-NEXT: fsrmi a0, 0
632 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
633 ; CHECK-NEXT: fsrm a0
634 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
635 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
636 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
638 %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
639 ret <vscale x 1 x float> %v
642 define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
643 ; CHECK-LABEL: vp_roundeven_nxv1f32_unmasked:
645 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
646 ; CHECK-NEXT: vfabs.v v9, v8
647 ; CHECK-NEXT: lui a0, 307200
648 ; CHECK-NEXT: fmv.w.x fa5, a0
649 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
650 ; CHECK-NEXT: fsrmi a0, 0
651 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
652 ; CHECK-NEXT: fsrm a0
653 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
654 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
655 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
657 %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
658 %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
659 %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
660 ret <vscale x 1 x float> %v
663 declare <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
665 define <vscale x 2 x float> @vp_roundeven_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
666 ; CHECK-LABEL: vp_roundeven_nxv2f32:
668 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
669 ; CHECK-NEXT: vfabs.v v9, v8, v0.t
670 ; CHECK-NEXT: lui a0, 307200
671 ; CHECK-NEXT: fmv.w.x fa5, a0
672 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
673 ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
674 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
675 ; CHECK-NEXT: fsrmi a0, 0
676 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
677 ; CHECK-NEXT: fsrm a0
678 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
679 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
680 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
682 %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
683 ret <vscale x 2 x float> %v
686 define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
687 ; CHECK-LABEL: vp_roundeven_nxv2f32_unmasked:
689 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
690 ; CHECK-NEXT: vfabs.v v9, v8
691 ; CHECK-NEXT: lui a0, 307200
692 ; CHECK-NEXT: fmv.w.x fa5, a0
693 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
694 ; CHECK-NEXT: fsrmi a0, 0
695 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
696 ; CHECK-NEXT: fsrm a0
697 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
698 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
699 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
701 %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
702 %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
703 %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
704 ret <vscale x 2 x float> %v
707 declare <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
709 define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
710 ; CHECK-LABEL: vp_roundeven_nxv4f32:
712 ; CHECK-NEXT: vmv1r.v v10, v0
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
714 ; CHECK-NEXT: vfabs.v v12, v8, v0.t
715 ; CHECK-NEXT: lui a0, 307200
716 ; CHECK-NEXT: fmv.w.x fa5, a0
717 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
718 ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
719 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
720 ; CHECK-NEXT: fsrmi a0, 0
721 ; CHECK-NEXT: vmv1r.v v0, v10
722 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
723 ; CHECK-NEXT: fsrm a0
724 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
725 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
726 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
728 %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
729 ret <vscale x 4 x float> %v
732 define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
733 ; CHECK-LABEL: vp_roundeven_nxv4f32_unmasked:
735 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
736 ; CHECK-NEXT: vfabs.v v10, v8
737 ; CHECK-NEXT: lui a0, 307200
738 ; CHECK-NEXT: fmv.w.x fa5, a0
739 ; CHECK-NEXT: vmflt.vf v0, v10, fa5
740 ; CHECK-NEXT: fsrmi a0, 0
741 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
742 ; CHECK-NEXT: fsrm a0
743 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
744 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
745 ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
747 %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
748 %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
749 %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
750 ret <vscale x 4 x float> %v
753 declare <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
755 define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
756 ; CHECK-LABEL: vp_roundeven_nxv8f32:
758 ; CHECK-NEXT: vmv1r.v v12, v0
759 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
760 ; CHECK-NEXT: vfabs.v v16, v8, v0.t
761 ; CHECK-NEXT: lui a0, 307200
762 ; CHECK-NEXT: fmv.w.x fa5, a0
763 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
764 ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
765 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
766 ; CHECK-NEXT: fsrmi a0, 0
767 ; CHECK-NEXT: vmv1r.v v0, v12
768 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
769 ; CHECK-NEXT: fsrm a0
770 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
771 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
772 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
774 %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
775 ret <vscale x 8 x float> %v
778 define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
779 ; CHECK-LABEL: vp_roundeven_nxv8f32_unmasked:
781 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
782 ; CHECK-NEXT: vfabs.v v12, v8
783 ; CHECK-NEXT: lui a0, 307200
784 ; CHECK-NEXT: fmv.w.x fa5, a0
785 ; CHECK-NEXT: vmflt.vf v0, v12, fa5
786 ; CHECK-NEXT: fsrmi a0, 0
787 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
788 ; CHECK-NEXT: fsrm a0
789 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
790 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
791 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
793 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
794 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
795 %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
796 ret <vscale x 8 x float> %v
799 declare <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32)
801 define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
802 ; CHECK-LABEL: vp_roundeven_nxv16f32:
804 ; CHECK-NEXT: vmv1r.v v16, v0
805 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
806 ; CHECK-NEXT: vfabs.v v24, v8, v0.t
807 ; CHECK-NEXT: lui a0, 307200
808 ; CHECK-NEXT: fmv.w.x fa5, a0
809 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
810 ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
811 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
812 ; CHECK-NEXT: fsrmi a0, 0
813 ; CHECK-NEXT: vmv1r.v v0, v16
814 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
815 ; CHECK-NEXT: fsrm a0
816 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
817 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
818 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
820 %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
821 ret <vscale x 16 x float> %v
824 define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
825 ; CHECK-LABEL: vp_roundeven_nxv16f32_unmasked:
827 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
828 ; CHECK-NEXT: vfabs.v v16, v8
829 ; CHECK-NEXT: lui a0, 307200
830 ; CHECK-NEXT: fmv.w.x fa5, a0
831 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
832 ; CHECK-NEXT: fsrmi a0, 0
833 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
834 ; CHECK-NEXT: fsrm a0
835 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
836 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
837 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
839 %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
840 %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
841 %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
842 ret <vscale x 16 x float> %v
845 declare <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
847 define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
848 ; CHECK-LABEL: vp_roundeven_nxv1f64:
850 ; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
851 ; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
852 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
853 ; CHECK-NEXT: vfabs.v v9, v8, v0.t
854 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
855 ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
856 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
857 ; CHECK-NEXT: fsrmi a0, 0
858 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
859 ; CHECK-NEXT: fsrm a0
860 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
861 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
862 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
864 %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
865 ret <vscale x 1 x double> %v
868 define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
869 ; CHECK-LABEL: vp_roundeven_nxv1f64_unmasked:
871 ; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
872 ; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
873 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
874 ; CHECK-NEXT: vfabs.v v9, v8
875 ; CHECK-NEXT: vmflt.vf v0, v9, fa5
876 ; CHECK-NEXT: fsrmi a0, 0
877 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
878 ; CHECK-NEXT: fsrm a0
879 ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
880 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
881 ; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
883 %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
884 %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
885 %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
886 ret <vscale x 1 x double> %v
889 declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
891 define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
892 ; CHECK-LABEL: vp_roundeven_nxv2f64:
894 ; CHECK-NEXT: vmv1r.v v10, v0
895 ; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
896 ; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
897 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
898 ; CHECK-NEXT: vfabs.v v12, v8, v0.t
899 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
900 ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
901 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
902 ; CHECK-NEXT: fsrmi a0, 0
903 ; CHECK-NEXT: vmv1r.v v0, v10
904 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
905 ; CHECK-NEXT: fsrm a0
906 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
907 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
908 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
910 %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
911 ret <vscale x 2 x double> %v
914 define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
915 ; CHECK-LABEL: vp_roundeven_nxv2f64_unmasked:
917 ; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
918 ; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
919 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
920 ; CHECK-NEXT: vfabs.v v10, v8
921 ; CHECK-NEXT: vmflt.vf v0, v10, fa5
922 ; CHECK-NEXT: fsrmi a0, 0
923 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
924 ; CHECK-NEXT: fsrm a0
925 ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
926 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
927 ; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
929 %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
930 %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
931 %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
932 ret <vscale x 2 x double> %v
935 declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
937 define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
938 ; CHECK-LABEL: vp_roundeven_nxv4f64:
940 ; CHECK-NEXT: vmv1r.v v12, v0
941 ; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
942 ; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
943 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
944 ; CHECK-NEXT: vfabs.v v16, v8, v0.t
945 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
946 ; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
947 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
948 ; CHECK-NEXT: fsrmi a0, 0
949 ; CHECK-NEXT: vmv1r.v v0, v12
950 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
951 ; CHECK-NEXT: fsrm a0
952 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
953 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
954 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
956 %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
957 ret <vscale x 4 x double> %v
960 define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
961 ; CHECK-LABEL: vp_roundeven_nxv4f64_unmasked:
963 ; CHECK-NEXT: lui a1, %hi(.LCPI27_0)
964 ; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a1)
965 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
966 ; CHECK-NEXT: vfabs.v v12, v8
967 ; CHECK-NEXT: vmflt.vf v0, v12, fa5
968 ; CHECK-NEXT: fsrmi a0, 0
969 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
970 ; CHECK-NEXT: fsrm a0
971 ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
972 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
973 ; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
975 %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
976 %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
977 %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
978 ret <vscale x 4 x double> %v
981 declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
983 define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
984 ; CHECK-LABEL: vp_roundeven_nxv7f64:
986 ; CHECK-NEXT: vmv1r.v v16, v0
987 ; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
988 ; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
989 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
990 ; CHECK-NEXT: vfabs.v v24, v8, v0.t
991 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
992 ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
993 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
994 ; CHECK-NEXT: fsrmi a0, 0
995 ; CHECK-NEXT: vmv1r.v v0, v16
996 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
997 ; CHECK-NEXT: fsrm a0
998 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
999 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1000 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
1002 %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
1003 ret <vscale x 7 x double> %v
1006 define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
1007 ; CHECK-LABEL: vp_roundeven_nxv7f64_unmasked:
1009 ; CHECK-NEXT: lui a1, %hi(.LCPI29_0)
1010 ; CHECK-NEXT: fld fa5, %lo(.LCPI29_0)(a1)
1011 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1012 ; CHECK-NEXT: vfabs.v v16, v8
1013 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
1014 ; CHECK-NEXT: fsrmi a0, 0
1015 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
1016 ; CHECK-NEXT: fsrm a0
1017 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
1018 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1019 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
1021 %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
1022 %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
1023 %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
1024 ret <vscale x 7 x double> %v
1027 declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
1029 define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1030 ; CHECK-LABEL: vp_roundeven_nxv8f64:
1032 ; CHECK-NEXT: vmv1r.v v16, v0
1033 ; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
1034 ; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
1035 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1036 ; CHECK-NEXT: vfabs.v v24, v8, v0.t
1037 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1038 ; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
1039 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1040 ; CHECK-NEXT: fsrmi a0, 0
1041 ; CHECK-NEXT: vmv1r.v v0, v16
1042 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
1043 ; CHECK-NEXT: fsrm a0
1044 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
1045 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1046 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
1048 %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
1049 ret <vscale x 8 x double> %v
1052 define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
1053 ; CHECK-LABEL: vp_roundeven_nxv8f64_unmasked:
1055 ; CHECK-NEXT: lui a1, %hi(.LCPI31_0)
1056 ; CHECK-NEXT: fld fa5, %lo(.LCPI31_0)(a1)
1057 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1058 ; CHECK-NEXT: vfabs.v v16, v8
1059 ; CHECK-NEXT: vmflt.vf v0, v16, fa5
1060 ; CHECK-NEXT: fsrmi a0, 0
1061 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
1062 ; CHECK-NEXT: fsrm a0
1063 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
1064 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1065 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
1067 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1068 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1069 %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
1070 ret <vscale x 8 x double> %v
1074 declare <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
1076 define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1077 ; CHECK-LABEL: vp_roundeven_nxv16f64:
1079 ; CHECK-NEXT: addi sp, sp, -16
1080 ; CHECK-NEXT: .cfi_def_cfa_offset 16
1081 ; CHECK-NEXT: csrr a1, vlenb
1082 ; CHECK-NEXT: slli a1, a1, 4
1083 ; CHECK-NEXT: sub sp, sp, a1
1084 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
1085 ; CHECK-NEXT: vmv1r.v v24, v0
1086 ; CHECK-NEXT: addi a1, sp, 16
1087 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
1088 ; CHECK-NEXT: csrr a1, vlenb
1089 ; CHECK-NEXT: srli a2, a1, 3
1090 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
1091 ; CHECK-NEXT: vslidedown.vx v25, v0, a2
1092 ; CHECK-NEXT: sub a2, a0, a1
1093 ; CHECK-NEXT: sltu a3, a0, a2
1094 ; CHECK-NEXT: addi a3, a3, -1
1095 ; CHECK-NEXT: and a2, a3, a2
1096 ; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
1097 ; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
1098 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1099 ; CHECK-NEXT: vmv1r.v v0, v25
1100 ; CHECK-NEXT: vfabs.v v8, v16, v0.t
1101 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1102 ; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
1103 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1104 ; CHECK-NEXT: fsrmi a2, 0
1105 ; CHECK-NEXT: vmv1r.v v0, v25
1106 ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
1107 ; CHECK-NEXT: fsrm a2
1108 ; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
1109 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1110 ; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
1111 ; CHECK-NEXT: csrr a2, vlenb
1112 ; CHECK-NEXT: slli a2, a2, 3
1113 ; CHECK-NEXT: add a2, sp, a2
1114 ; CHECK-NEXT: addi a2, a2, 16
1115 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
1116 ; CHECK-NEXT: bltu a0, a1, .LBB32_2
1117 ; CHECK-NEXT: # %bb.1:
1118 ; CHECK-NEXT: mv a0, a1
1119 ; CHECK-NEXT: .LBB32_2:
1120 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1121 ; CHECK-NEXT: vmv1r.v v0, v24
1122 ; CHECK-NEXT: addi a0, sp, 16
1123 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1124 ; CHECK-NEXT: vfabs.v v16, v8, v0.t
1125 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1126 ; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
1127 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1128 ; CHECK-NEXT: fsrmi a0, 0
1129 ; CHECK-NEXT: vmv1r.v v0, v24
1130 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
1131 ; CHECK-NEXT: fsrm a0
1132 ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
1133 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1134 ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
1135 ; CHECK-NEXT: csrr a0, vlenb
1136 ; CHECK-NEXT: slli a0, a0, 3
1137 ; CHECK-NEXT: add a0, sp, a0
1138 ; CHECK-NEXT: addi a0, a0, 16
1139 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1140 ; CHECK-NEXT: csrr a0, vlenb
1141 ; CHECK-NEXT: slli a0, a0, 4
1142 ; CHECK-NEXT: add sp, sp, a0
1143 ; CHECK-NEXT: addi sp, sp, 16
1145 %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
1146 ret <vscale x 16 x double> %v
1149 define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
1150 ; CHECK-LABEL: vp_roundeven_nxv16f64_unmasked:
1152 ; CHECK-NEXT: csrr a1, vlenb
1153 ; CHECK-NEXT: sub a2, a0, a1
1154 ; CHECK-NEXT: lui a3, %hi(.LCPI33_0)
1155 ; CHECK-NEXT: fld fa5, %lo(.LCPI33_0)(a3)
1156 ; CHECK-NEXT: sltu a3, a0, a2
1157 ; CHECK-NEXT: addi a3, a3, -1
1158 ; CHECK-NEXT: and a2, a3, a2
1159 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1160 ; CHECK-NEXT: vfabs.v v24, v16
1161 ; CHECK-NEXT: vmflt.vf v0, v24, fa5
1162 ; CHECK-NEXT: fsrmi a2, 0
1163 ; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
1164 ; CHECK-NEXT: fsrm a2
1165 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
1166 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1167 ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
1168 ; CHECK-NEXT: bltu a0, a1, .LBB33_2
1169 ; CHECK-NEXT: # %bb.1:
1170 ; CHECK-NEXT: mv a0, a1
1171 ; CHECK-NEXT: .LBB33_2:
1172 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1173 ; CHECK-NEXT: vfabs.v v24, v8
1174 ; CHECK-NEXT: vmflt.vf v0, v24, fa5
1175 ; CHECK-NEXT: fsrmi a0, 0
1176 ; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
1177 ; CHECK-NEXT: fsrm a0
1178 ; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
1179 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1180 ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
1182 %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
1183 %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1184 %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
1185 ret <vscale x 16 x double> %v