1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
7 declare <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
9 define <vscale x 2 x i7> @vfptoui_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
10 ; ZVFH-LABEL: vfptoui_v4i7_v4f16:
12 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
13 ; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
14 ; ZVFH-NEXT: vmv1r.v v8, v9
17 ; ZVFHMIN-LABEL: vfptoui_v4i7_v4f16:
19 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
20 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
21 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
22 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
23 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
24 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
26 %v = call <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
27 ret <vscale x 2 x i7> %v
30 declare <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
32 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
33 ; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16:
35 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
36 ; ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t
37 ; ZVFH-NEXT: vmv1r.v v8, v9
40 ; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16:
42 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
43 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
44 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
45 ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
46 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
47 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
49 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
50 ret <vscale x 2 x i8> %v
53 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
54 ; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
56 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
57 ; ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
58 ; ZVFH-NEXT: vmv1r.v v8, v9
61 ; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
63 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
64 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
65 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
66 ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
67 ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
68 ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
70 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
71 ret <vscale x 2 x i8> %v
74 declare <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
76 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
77 ; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16:
79 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
80 ; ZVFH-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
83 ; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16:
85 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
86 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
87 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
88 ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
90 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
91 ret <vscale x 2 x i16> %v
94 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
95 ; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
97 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
98 ; ZVFH-NEXT: vfcvt.rtz.xu.f.v v8, v8
101 ; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
103 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
104 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
105 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
106 ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
108 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
109 ret <vscale x 2 x i16> %v
112 declare <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
114 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
115 ; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16:
117 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
118 ; ZVFH-NEXT: vfwcvt.rtz.xu.f.v v9, v8, v0.t
119 ; ZVFH-NEXT: vmv1r.v v8, v9
122 ; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16:
124 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
125 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
126 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
127 ; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t
129 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
130 ret <vscale x 2 x i32> %v
133 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
134 ; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
136 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
137 ; ZVFH-NEXT: vfwcvt.rtz.xu.f.v v9, v8
138 ; ZVFH-NEXT: vmv1r.v v8, v9
141 ; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
143 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
144 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
145 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
146 ; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
148 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
149 ret <vscale x 2 x i32> %v
152 declare <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
154 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
155 ; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16:
157 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
158 ; ZVFH-NEXT: vfwcvt.f.f.v v10, v8, v0.t
159 ; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
160 ; ZVFH-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t
163 ; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16:
165 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
166 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
167 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
168 ; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t
170 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
171 ret <vscale x 2 x i64> %v
174 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
175 ; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
177 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
178 ; ZVFH-NEXT: vfwcvt.f.f.v v10, v8
179 ; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
180 ; ZVFH-NEXT: vfwcvt.rtz.xu.f.v v8, v10
183 ; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
185 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
186 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
187 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
188 ; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10
190 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
191 ret <vscale x 2 x i64> %v
194 declare <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
196 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
197 ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32:
199 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
200 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t
201 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
202 ; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t
204 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
205 ret <vscale x 2 x i8> %v
208 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
209 ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32_unmasked:
211 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
212 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
213 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
214 ; CHECK-NEXT: vnsrl.wi v8, v9, 0
216 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
217 ret <vscale x 2 x i8> %v
220 declare <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
222 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
223 ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32:
225 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
226 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t
227 ; CHECK-NEXT: vmv1r.v v8, v9
229 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
230 ret <vscale x 2 x i16> %v
233 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
234 ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32_unmasked:
236 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
237 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
238 ; CHECK-NEXT: vmv1r.v v8, v9
240 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
241 ret <vscale x 2 x i16> %v
244 declare <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
246 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
247 ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32:
249 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
250 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
252 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
253 ret <vscale x 2 x i32> %v
256 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
257 ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32_unmasked:
259 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
260 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
262 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
263 ret <vscale x 2 x i32> %v
266 declare <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
268 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
269 ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32:
271 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
272 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8, v0.t
273 ; CHECK-NEXT: vmv2r.v v8, v10
275 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
276 ret <vscale x 2 x i64> %v
279 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
280 ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32_unmasked:
282 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
283 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
284 ; CHECK-NEXT: vmv2r.v v8, v10
286 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
287 ret <vscale x 2 x i64> %v
290 declare <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
292 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
293 ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64:
295 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
296 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t
297 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
298 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
299 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
300 ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
302 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
303 ret <vscale x 2 x i8> %v
306 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
307 ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64_unmasked:
309 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
310 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
311 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
312 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
313 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
314 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
316 %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
317 ret <vscale x 2 x i8> %v
320 declare <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
322 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
323 ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64:
325 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
326 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t
327 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
328 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
330 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
331 ret <vscale x 2 x i16> %v
334 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
335 ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64_unmasked:
337 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
338 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
339 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
340 ; CHECK-NEXT: vnsrl.wi v8, v10, 0
342 %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
343 ret <vscale x 2 x i16> %v
346 declare <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
348 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
349 ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64:
351 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
352 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t
353 ; CHECK-NEXT: vmv.v.v v8, v10
355 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
356 ret <vscale x 2 x i32> %v
359 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
360 ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64_unmasked:
362 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
363 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
364 ; CHECK-NEXT: vmv.v.v v8, v10
366 %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
367 ret <vscale x 2 x i32> %v
370 declare <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
372 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
373 ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64:
375 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
376 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
378 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
379 ret <vscale x 2 x i64> %v
382 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
383 ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64_unmasked:
385 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
386 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
388 %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
389 ret <vscale x 2 x i64> %v
392 declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float>, <vscale x 32 x i1>, i32)
394 define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
395 ; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
397 ; CHECK-NEXT: addi sp, sp, -16
398 ; CHECK-NEXT: .cfi_def_cfa_offset 16
399 ; CHECK-NEXT: csrr a1, vlenb
400 ; CHECK-NEXT: slli a1, a1, 3
401 ; CHECK-NEXT: sub sp, sp, a1
402 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
403 ; CHECK-NEXT: vmv1r.v v7, v0
404 ; CHECK-NEXT: addi a1, sp, 16
405 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
406 ; CHECK-NEXT: csrr a1, vlenb
407 ; CHECK-NEXT: srli a2, a1, 2
408 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
409 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
410 ; CHECK-NEXT: slli a1, a1, 1
411 ; CHECK-NEXT: sub a2, a0, a1
412 ; CHECK-NEXT: sltu a3, a0, a2
413 ; CHECK-NEXT: addi a3, a3, -1
414 ; CHECK-NEXT: and a2, a3, a2
415 ; CHECK-NEXT: addi a3, sp, 16
416 ; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
417 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
418 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v20, v24, v0.t
419 ; CHECK-NEXT: bltu a0, a1, .LBB25_2
420 ; CHECK-NEXT: # %bb.1:
421 ; CHECK-NEXT: mv a0, a1
422 ; CHECK-NEXT: .LBB25_2:
423 ; CHECK-NEXT: vmv1r.v v0, v7
424 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
425 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8, v0.t
426 ; CHECK-NEXT: vmv8r.v v8, v16
427 ; CHECK-NEXT: csrr a0, vlenb
428 ; CHECK-NEXT: slli a0, a0, 3
429 ; CHECK-NEXT: add sp, sp, a0
430 ; CHECK-NEXT: addi sp, sp, 16
432 %v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
433 ret <vscale x 32 x i16> %v
436 declare <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float>, <vscale x 32 x i1>, i32)
438 define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
439 ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32:
441 ; CHECK-NEXT: vmv1r.v v24, v0
442 ; CHECK-NEXT: csrr a1, vlenb
443 ; CHECK-NEXT: srli a2, a1, 2
444 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
445 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
446 ; CHECK-NEXT: slli a1, a1, 1
447 ; CHECK-NEXT: sub a2, a0, a1
448 ; CHECK-NEXT: sltu a3, a0, a2
449 ; CHECK-NEXT: addi a3, a3, -1
450 ; CHECK-NEXT: and a2, a3, a2
451 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
452 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t
453 ; CHECK-NEXT: bltu a0, a1, .LBB26_2
454 ; CHECK-NEXT: # %bb.1:
455 ; CHECK-NEXT: mv a0, a1
456 ; CHECK-NEXT: .LBB26_2:
457 ; CHECK-NEXT: vmv1r.v v0, v24
458 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
459 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
461 %v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
462 ret <vscale x 32 x i32> %v
465 define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32_unmasked(<vscale x 32 x float> %va, i32 zeroext %evl) {
466 ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32_unmasked:
468 ; CHECK-NEXT: csrr a1, vlenb
469 ; CHECK-NEXT: slli a1, a1, 1
470 ; CHECK-NEXT: sub a2, a0, a1
471 ; CHECK-NEXT: sltu a3, a0, a2
472 ; CHECK-NEXT: addi a3, a3, -1
473 ; CHECK-NEXT: and a2, a3, a2
474 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
475 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16
476 ; CHECK-NEXT: bltu a0, a1, .LBB27_2
477 ; CHECK-NEXT: # %bb.1:
478 ; CHECK-NEXT: mv a0, a1
479 ; CHECK-NEXT: .LBB27_2:
480 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
481 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
483 %v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
484 ret <vscale x 32 x i32> %v