1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
7 define <vscale x 2 x i7> @vfptosi_v4i7_v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
8 ; CHECK-LABEL: vfptosi_v4i7_v4bf16:
10 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
11 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
12 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
13 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
14 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
16 %v = call <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
17 ret <vscale x 2 x i7> %v
20 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
21 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2bf16:
23 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
24 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
25 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
26 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
27 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
29 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
30 ret <vscale x 2 x i8> %v
33 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
34 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2bf16_unmasked:
36 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
37 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
38 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9
39 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
40 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
42 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
43 ret <vscale x 2 x i8> %v
46 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
47 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2bf16:
49 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
50 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
51 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
53 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
54 ret <vscale x 2 x i16> %v
57 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
58 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2bf16_unmasked:
60 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
61 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
62 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9
64 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
65 ret <vscale x 2 x i16> %v
68 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
69 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2bf16:
71 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
72 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
73 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
74 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
76 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
77 ret <vscale x 2 x i32> %v
80 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
81 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2bf16_unmasked:
83 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
84 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
85 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
86 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9
88 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
89 ret <vscale x 2 x i32> %v
92 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
93 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2bf16:
95 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
96 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
97 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
98 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
100 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
101 ret <vscale x 2 x i64> %v
104 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
105 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2bf16_unmasked:
107 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
108 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
109 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
110 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10
112 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
113 ret <vscale x 2 x i64> %v
116 declare <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
118 define <vscale x 2 x i7> @vfptosi_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
119 ; ZVFH-LABEL: vfptosi_v4i7_v4f16:
121 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
122 ; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
123 ; ZVFH-NEXT: vmv1r.v v8, v9
126 ; ZVFHMIN-LABEL: vfptosi_v4i7_v4f16:
128 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
129 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
130 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
131 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
132 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
134 %v = call <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
135 ret <vscale x 2 x i7> %v
138 declare <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
140 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
141 ; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16:
143 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
144 ; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
145 ; ZVFH-NEXT: vmv1r.v v8, v9
148 ; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16:
150 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
151 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
152 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
153 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
154 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
156 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
157 ret <vscale x 2 x i8> %v
160 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
161 ; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
163 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
164 ; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
165 ; ZVFH-NEXT: vmv1r.v v8, v9
168 ; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
170 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
171 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
172 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
173 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
174 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
176 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
177 ret <vscale x 2 x i8> %v
180 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
182 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
183 ; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16:
185 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
186 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
189 ; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16:
191 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
192 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
193 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
195 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
196 ret <vscale x 2 x i16> %v
199 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
200 ; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
202 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
203 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v8, v8
206 ; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
208 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
209 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
210 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
212 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
213 ret <vscale x 2 x i16> %v
216 declare <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
218 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
219 ; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16:
221 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
222 ; ZVFH-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t
223 ; ZVFH-NEXT: vmv1r.v v8, v9
226 ; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16:
228 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
229 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
230 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
231 ; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
233 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
234 ret <vscale x 2 x i32> %v
237 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
238 ; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
240 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
241 ; ZVFH-NEXT: vfwcvt.rtz.x.f.v v9, v8
242 ; ZVFH-NEXT: vmv1r.v v8, v9
245 ; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
247 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
248 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
249 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
250 ; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
252 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
253 ret <vscale x 2 x i32> %v
256 declare <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
258 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
259 ; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16:
261 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
262 ; ZVFH-NEXT: vfwcvt.f.f.v v10, v8, v0.t
263 ; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
264 ; ZVFH-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
267 ; ZVFHMIN-LABEL: vfptosi_nxv2i64_nxv2f16:
269 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
270 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
271 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
272 ; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
274 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
275 ret <vscale x 2 x i64> %v
278 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
279 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
281 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
282 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
283 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
284 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10
286 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
287 ret <vscale x 2 x i64> %v
290 declare <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
292 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
293 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32:
295 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
296 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
297 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
298 ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t
300 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
301 ret <vscale x 2 x i8> %v
304 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
305 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32_unmasked:
307 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
308 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
309 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
310 ; CHECK-NEXT: vnsrl.wi v8, v9, 0
312 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
313 ret <vscale x 2 x i8> %v
316 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
318 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
319 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32:
321 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
322 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
323 ; CHECK-NEXT: vmv1r.v v8, v9
325 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
326 ret <vscale x 2 x i16> %v
329 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
330 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32_unmasked:
332 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
333 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
334 ; CHECK-NEXT: vmv1r.v v8, v9
336 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
337 ret <vscale x 2 x i16> %v
340 declare <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
342 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
343 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32:
345 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
346 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
348 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
349 ret <vscale x 2 x i32> %v
352 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
353 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32_unmasked:
355 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
356 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
358 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
359 ret <vscale x 2 x i32> %v
362 declare <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
364 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
365 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32:
367 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
368 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t
369 ; CHECK-NEXT: vmv2r.v v8, v10
371 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
372 ret <vscale x 2 x i64> %v
375 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
376 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32_unmasked:
378 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
379 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
380 ; CHECK-NEXT: vmv2r.v v8, v10
382 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
383 ret <vscale x 2 x i64> %v
386 declare <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
388 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
389 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64:
391 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
392 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t
393 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
394 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
395 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
396 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
398 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
399 ret <vscale x 2 x i8> %v
402 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
403 ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64_unmasked:
405 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
406 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
407 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
408 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
409 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
410 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
412 %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
413 ret <vscale x 2 x i8> %v
416 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
418 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
419 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64:
421 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
422 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t
423 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
424 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
426 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
427 ret <vscale x 2 x i16> %v
430 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
431 ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64_unmasked:
433 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
434 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
435 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
436 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
438 %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
439 ret <vscale x 2 x i16> %v
442 declare <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
444 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
445 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64:
447 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
448 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t
449 ; CHECK-NEXT: vmv.v.v v8, v10
451 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
452 ret <vscale x 2 x i32> %v
455 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
456 ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64_unmasked:
458 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
459 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
460 ; CHECK-NEXT: vmv.v.v v8, v10
462 %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
463 ret <vscale x 2 x i32> %v
466 declare <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
468 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
469 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64:
471 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
472 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
474 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
475 ret <vscale x 2 x i64> %v
478 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
479 ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64_unmasked:
481 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
482 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
484 %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
485 ret <vscale x 2 x i64> %v
488 declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float>, <vscale x 32 x i1>, i32)
490 define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
491 ; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
493 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
494 ; CHECK-NEXT: vmv1r.v v24, v0
495 ; CHECK-NEXT: csrr a1, vlenb
496 ; CHECK-NEXT: srli a2, a1, 2
497 ; CHECK-NEXT: slli a1, a1, 1
498 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
499 ; CHECK-NEXT: sub a2, a0, a1
500 ; CHECK-NEXT: sltu a3, a0, a2
501 ; CHECK-NEXT: addi a3, a3, -1
502 ; CHECK-NEXT: and a2, a3, a2
503 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
504 ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v16, v0.t
505 ; CHECK-NEXT: bltu a0, a1, .LBB34_2
506 ; CHECK-NEXT: # %bb.1:
507 ; CHECK-NEXT: mv a0, a1
508 ; CHECK-NEXT: .LBB34_2:
509 ; CHECK-NEXT: vmv1r.v v0, v24
510 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
511 ; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
512 ; CHECK-NEXT: vmv8r.v v8, v24
514 %v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
515 ret <vscale x 32 x i16> %v
518 declare <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float>, <vscale x 32 x i1>, i32)
520 define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
521 ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32:
523 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
524 ; CHECK-NEXT: vmv1r.v v24, v0
525 ; CHECK-NEXT: csrr a1, vlenb
526 ; CHECK-NEXT: srli a2, a1, 2
527 ; CHECK-NEXT: slli a1, a1, 1
528 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
529 ; CHECK-NEXT: sub a2, a0, a1
530 ; CHECK-NEXT: sltu a3, a0, a2
531 ; CHECK-NEXT: addi a3, a3, -1
532 ; CHECK-NEXT: and a2, a3, a2
533 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
534 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t
535 ; CHECK-NEXT: bltu a0, a1, .LBB35_2
536 ; CHECK-NEXT: # %bb.1:
537 ; CHECK-NEXT: mv a0, a1
538 ; CHECK-NEXT: .LBB35_2:
539 ; CHECK-NEXT: vmv1r.v v0, v24
540 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
541 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
543 %v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
544 ret <vscale x 32 x i32> %v
547 define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32_unmasked(<vscale x 32 x float> %va, i32 zeroext %evl) {
548 ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32_unmasked:
550 ; CHECK-NEXT: csrr a1, vlenb
551 ; CHECK-NEXT: slli a1, a1, 1
552 ; CHECK-NEXT: sub a2, a0, a1
553 ; CHECK-NEXT: sltu a3, a0, a2
554 ; CHECK-NEXT: addi a3, a3, -1
555 ; CHECK-NEXT: and a2, a3, a2
556 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
557 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16
558 ; CHECK-NEXT: bltu a0, a1, .LBB36_2
559 ; CHECK-NEXT: # %bb.1:
560 ; CHECK-NEXT: mv a0, a1
561 ; CHECK-NEXT: .LBB36_2:
562 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
563 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
565 %v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
566 ret <vscale x 32 x i32> %v