1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
7 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i7(<vscale x 2 x i7>, <vscale x 2 x i1>, i32)
9 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
10 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i7:
12 ; ZVFH-NEXT: li a1, 127
13 ; ZVFH-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
14 ; ZVFH-NEXT: vand.vx v9, v8, a1
15 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
16 ; ZVFH-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
19 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i7:
21 ; ZVFHMIN-NEXT: li a1, 127
22 ; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
23 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
24 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
25 ; ZVFHMIN-NEXT: vzext.vf2 v9, v8, v0.t
26 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v9, v0.t
27 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
28 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
30 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 %evl)
31 ret <vscale x 2 x half> %v
34 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
36 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
37 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8:
39 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
40 ; ZVFH-NEXT: vfwcvt.f.xu.v v9, v8, v0.t
41 ; ZVFH-NEXT: vmv1r.v v8, v9
44 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i8:
46 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
47 ; ZVFHMIN-NEXT: vzext.vf2 v9, v8, v0.t
48 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v9, v0.t
49 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
50 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
52 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
53 ret <vscale x 2 x half> %v
56 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
57 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8_unmasked:
59 ; ZVFH-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
60 ; ZVFH-NEXT: vfwcvt.f.xu.v v9, v8
61 ; ZVFH-NEXT: vmv1r.v v8, v9
64 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i8_unmasked:
66 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
67 ; ZVFHMIN-NEXT: vzext.vf2 v9, v8
68 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v9
69 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
70 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
72 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
73 ret <vscale x 2 x half> %v
76 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
78 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
79 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16:
81 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
82 ; ZVFH-NEXT: vfcvt.f.xu.v v8, v8, v0.t
85 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i16:
87 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
88 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v9, v8, v0.t
89 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
90 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
92 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
93 ret <vscale x 2 x half> %v
96 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
97 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16_unmasked:
99 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
100 ; ZVFH-NEXT: vfcvt.f.xu.v v8, v8
103 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i16_unmasked:
105 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
106 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v9, v8
107 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
108 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
110 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
111 ret <vscale x 2 x half> %v
114 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
116 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
117 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32:
119 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
120 ; ZVFH-NEXT: vfncvt.f.xu.w v9, v8, v0.t
121 ; ZVFH-NEXT: vmv1r.v v8, v9
124 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i32:
126 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
127 ; ZVFHMIN-NEXT: vfcvt.f.xu.v v9, v8, v0.t
128 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
129 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
131 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
132 ret <vscale x 2 x half> %v
135 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
136 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32_unmasked:
138 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
139 ; ZVFH-NEXT: vfncvt.f.xu.w v9, v8
140 ; ZVFH-NEXT: vmv1r.v v8, v9
143 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i32_unmasked:
145 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
146 ; ZVFHMIN-NEXT: vfcvt.f.xu.v v9, v8
147 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
148 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
150 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
151 ret <vscale x 2 x half> %v
154 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
156 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
157 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64:
159 ; ZVFH-NEXT: vsetvli zero, a0, e32, m1, ta, ma
160 ; ZVFH-NEXT: vfncvt.f.xu.w v10, v8, v0.t
161 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
162 ; ZVFH-NEXT: vfncvt.f.f.w v8, v10, v0.t
165 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i64:
167 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
168 ; ZVFHMIN-NEXT: vfncvt.f.xu.w v10, v8, v0.t
169 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
170 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
172 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
173 ret <vscale x 2 x half> %v
176 define <vscale x 2 x half> @vuitofp_nxv2f16_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
177 ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64_unmasked:
179 ; ZVFH-NEXT: vsetvli zero, a0, e32, m1, ta, ma
180 ; ZVFH-NEXT: vfncvt.f.xu.w v10, v8
181 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
182 ; ZVFH-NEXT: vfncvt.f.f.w v8, v10
185 ; ZVFHMIN-LABEL: vuitofp_nxv2f16_nxv2i64_unmasked:
187 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
188 ; ZVFHMIN-NEXT: vfncvt.f.xu.w v10, v8
189 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
190 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
192 %v = call <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
193 ret <vscale x 2 x half> %v
196 declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
198 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
199 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8:
201 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
202 ; CHECK-NEXT: vzext.vf2 v9, v8, v0.t
203 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t
205 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
206 ret <vscale x 2 x float> %v
209 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
210 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8_unmasked:
212 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
213 ; CHECK-NEXT: vzext.vf2 v9, v8
214 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
216 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
217 ret <vscale x 2 x float> %v
220 declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
222 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
223 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16:
225 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
226 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8, v0.t
227 ; CHECK-NEXT: vmv1r.v v8, v9
229 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
230 ret <vscale x 2 x float> %v
233 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
234 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16_unmasked:
236 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
237 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
238 ; CHECK-NEXT: vmv1r.v v8, v9
240 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
241 ret <vscale x 2 x float> %v
244 declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
246 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
247 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32:
249 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
250 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
252 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
253 ret <vscale x 2 x float> %v
256 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
257 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32_unmasked:
259 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
260 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
262 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
263 ret <vscale x 2 x float> %v
266 declare <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
268 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
269 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64:
271 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
272 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8, v0.t
273 ; CHECK-NEXT: vmv.v.v v8, v10
275 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
276 ret <vscale x 2 x float> %v
279 define <vscale x 2 x float> @vuitofp_nxv2f32_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
280 ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64_unmasked:
282 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
283 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
284 ; CHECK-NEXT: vmv.v.v v8, v10
286 %v = call <vscale x 2 x float> @llvm.vp.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
287 ret <vscale x 2 x float> %v
290 declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
292 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
293 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8:
295 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
296 ; CHECK-NEXT: vzext.vf4 v10, v8, v0.t
297 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t
299 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
300 ret <vscale x 2 x double> %v
303 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
304 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8_unmasked:
306 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
307 ; CHECK-NEXT: vzext.vf4 v10, v8
308 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
310 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
311 ret <vscale x 2 x double> %v
314 declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
316 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
317 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16:
319 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
320 ; CHECK-NEXT: vzext.vf2 v10, v8, v0.t
321 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t
323 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
324 ret <vscale x 2 x double> %v
327 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
328 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16_unmasked:
330 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
331 ; CHECK-NEXT: vzext.vf2 v10, v8
332 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
334 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
335 ret <vscale x 2 x double> %v
338 declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
340 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
341 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32:
343 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
344 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8, v0.t
345 ; CHECK-NEXT: vmv2r.v v8, v10
347 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
348 ret <vscale x 2 x double> %v
351 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
352 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32_unmasked:
354 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
355 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
356 ; CHECK-NEXT: vmv2r.v v8, v10
358 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
359 ret <vscale x 2 x double> %v
362 declare <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
364 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
365 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64:
367 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
368 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
370 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
371 ret <vscale x 2 x double> %v
374 define <vscale x 2 x double> @vuitofp_nxv2f64_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
375 ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64_unmasked:
377 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
378 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
380 %v = call <vscale x 2 x double> @llvm.vp.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
381 ret <vscale x 2 x double> %v
384 declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
386 define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
387 ; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
389 ; ZVFH-NEXT: addi sp, sp, -16
390 ; ZVFH-NEXT: .cfi_def_cfa_offset 16
391 ; ZVFH-NEXT: csrr a1, vlenb
392 ; ZVFH-NEXT: slli a1, a1, 3
393 ; ZVFH-NEXT: sub sp, sp, a1
394 ; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
395 ; ZVFH-NEXT: vmv1r.v v7, v0
396 ; ZVFH-NEXT: addi a1, sp, 16
397 ; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
398 ; ZVFH-NEXT: csrr a1, vlenb
399 ; ZVFH-NEXT: srli a2, a1, 2
400 ; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
401 ; ZVFH-NEXT: vslidedown.vx v0, v0, a2
402 ; ZVFH-NEXT: slli a1, a1, 1
403 ; ZVFH-NEXT: sub a2, a0, a1
404 ; ZVFH-NEXT: sltu a3, a0, a2
405 ; ZVFH-NEXT: addi a3, a3, -1
406 ; ZVFH-NEXT: and a2, a3, a2
407 ; ZVFH-NEXT: addi a3, sp, 16
408 ; ZVFH-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
409 ; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
410 ; ZVFH-NEXT: vfncvt.f.xu.w v20, v24, v0.t
411 ; ZVFH-NEXT: bltu a0, a1, .LBB25_2
412 ; ZVFH-NEXT: # %bb.1:
413 ; ZVFH-NEXT: mv a0, a1
414 ; ZVFH-NEXT: .LBB25_2:
415 ; ZVFH-NEXT: vmv1r.v v0, v7
416 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
417 ; ZVFH-NEXT: vfncvt.f.xu.w v16, v8, v0.t
418 ; ZVFH-NEXT: vmv8r.v v8, v16
419 ; ZVFH-NEXT: csrr a0, vlenb
420 ; ZVFH-NEXT: slli a0, a0, 3
421 ; ZVFH-NEXT: add sp, sp, a0
422 ; ZVFH-NEXT: addi sp, sp, 16
425 ; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
427 ; ZVFHMIN-NEXT: vmv1r.v v7, v0
428 ; ZVFHMIN-NEXT: csrr a1, vlenb
429 ; ZVFHMIN-NEXT: srli a2, a1, 2
430 ; ZVFHMIN-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
431 ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
432 ; ZVFHMIN-NEXT: slli a1, a1, 1
433 ; ZVFHMIN-NEXT: sub a2, a0, a1
434 ; ZVFHMIN-NEXT: sltu a3, a0, a2
435 ; ZVFHMIN-NEXT: addi a3, a3, -1
436 ; ZVFHMIN-NEXT: and a2, a3, a2
437 ; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
438 ; ZVFHMIN-NEXT: vfcvt.f.xu.v v24, v16, v0.t
439 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
440 ; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
441 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2
442 ; ZVFHMIN-NEXT: # %bb.1:
443 ; ZVFHMIN-NEXT: mv a0, a1
444 ; ZVFHMIN-NEXT: .LBB25_2:
445 ; ZVFHMIN-NEXT: vmv1r.v v0, v7
446 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
447 ; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
448 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
449 ; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
450 ; ZVFHMIN-NEXT: vmv8r.v v8, v16
452 %v = call <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
453 ret <vscale x 32 x half> %v
456 declare <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
458 define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
459 ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32:
461 ; CHECK-NEXT: vmv1r.v v24, v0
462 ; CHECK-NEXT: csrr a1, vlenb
463 ; CHECK-NEXT: srli a2, a1, 2
464 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
465 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
466 ; CHECK-NEXT: slli a1, a1, 1
467 ; CHECK-NEXT: sub a2, a0, a1
468 ; CHECK-NEXT: sltu a3, a0, a2
469 ; CHECK-NEXT: addi a3, a3, -1
470 ; CHECK-NEXT: and a2, a3, a2
471 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
472 ; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t
473 ; CHECK-NEXT: bltu a0, a1, .LBB26_2
474 ; CHECK-NEXT: # %bb.1:
475 ; CHECK-NEXT: mv a0, a1
476 ; CHECK-NEXT: .LBB26_2:
477 ; CHECK-NEXT: vmv1r.v v0, v24
478 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
479 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
481 %v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
482 ret <vscale x 32 x float> %v
485 define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
486 ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32_unmasked:
488 ; CHECK-NEXT: csrr a1, vlenb
489 ; CHECK-NEXT: slli a1, a1, 1
490 ; CHECK-NEXT: sub a2, a0, a1
491 ; CHECK-NEXT: sltu a3, a0, a2
492 ; CHECK-NEXT: addi a3, a3, -1
493 ; CHECK-NEXT: and a2, a3, a2
494 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
495 ; CHECK-NEXT: vfcvt.f.xu.v v16, v16
496 ; CHECK-NEXT: bltu a0, a1, .LBB27_2
497 ; CHECK-NEXT: # %bb.1:
498 ; CHECK-NEXT: mv a0, a1
499 ; CHECK-NEXT: .LBB27_2:
500 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
501 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
503 %v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
504 ret <vscale x 32 x float> %v