1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
8 define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
9 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16:
11 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
12 ; CHECK-NEXT: vmv.v.i v8, 0
13 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
14 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
16 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
17 ret <vscale x 1 x half> %evec
20 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
21 define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
22 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16:
24 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
25 ; CHECK-NEXT: vmv.v.i v8, 0
26 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
27 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
29 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
30 ret <vscale x 1 x half> %evec
33 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
34 define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
35 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32:
37 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
38 ; CHECK-NEXT: vmv.v.i v8, 0
39 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
40 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
42 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
43 ret <vscale x 1 x float> %evec
46 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
47 define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
48 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32:
50 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
51 ; CHECK-NEXT: vmv.v.i v8, 0
52 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
53 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
55 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
56 ret <vscale x 1 x float> %evec
59 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
60 define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
61 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64:
63 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
64 ; CHECK-NEXT: vmv.v.i v8, 0
65 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
66 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
68 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69 ret <vscale x 1 x double> %evec
72 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
73 define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
74 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64:
76 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
77 ; CHECK-NEXT: vmv.v.i v8, 0
78 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
79 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
81 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82 ret <vscale x 1 x double> %evec
85 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
86 define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
87 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16:
89 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
90 ; CHECK-NEXT: vmv.v.i v8, 0
91 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
92 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
94 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
95 ret <vscale x 2 x half> %evec
98 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
99 define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
100 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16:
102 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
103 ; CHECK-NEXT: vmv.v.i v8, 0
104 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
105 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
107 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
108 ret <vscale x 2 x half> %evec
111 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
112 define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
113 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32:
115 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
116 ; CHECK-NEXT: vmv.v.i v8, 0
117 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
118 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
120 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121 ret <vscale x 2 x float> %evec
124 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
125 define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
126 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32:
128 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
129 ; CHECK-NEXT: vmv.v.i v8, 0
130 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
131 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
133 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134 ret <vscale x 2 x float> %evec
137 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
138 define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
139 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64:
141 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
142 ; CHECK-NEXT: vmv.v.i v8, 0
143 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
144 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
146 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147 ret <vscale x 2 x double> %evec
150 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
151 define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
152 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64:
154 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
155 ; CHECK-NEXT: vmv.v.i v8, 0
156 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
157 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
159 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160 ret <vscale x 2 x double> %evec
163 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
164 define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
165 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16:
167 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
168 ; CHECK-NEXT: vmv.v.i v8, 0
169 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
170 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
172 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
173 ret <vscale x 4 x half> %evec
176 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
177 define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
178 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16:
180 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
181 ; CHECK-NEXT: vmv.v.i v8, 0
182 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
183 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
185 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
186 ret <vscale x 4 x half> %evec
189 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
190 define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
191 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32:
193 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
194 ; CHECK-NEXT: vmv.v.i v8, 0
195 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
196 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
198 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199 ret <vscale x 4 x float> %evec
202 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
203 define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
204 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32:
206 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
207 ; CHECK-NEXT: vmv.v.i v8, 0
208 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
209 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
211 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212 ret <vscale x 4 x float> %evec
215 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
216 define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
217 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64:
219 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
220 ; CHECK-NEXT: vmv.v.i v8, 0
221 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
222 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
224 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225 ret <vscale x 4 x double> %evec
228 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
229 define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
230 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64:
232 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
233 ; CHECK-NEXT: vmv.v.i v8, 0
234 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
235 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
237 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238 ret <vscale x 4 x double> %evec
241 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
242 define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
243 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16:
245 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
246 ; CHECK-NEXT: vmv.v.i v8, 0
247 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
248 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
250 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
251 ret <vscale x 8 x half> %evec
254 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
255 define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
256 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16:
258 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
259 ; CHECK-NEXT: vmv.v.i v8, 0
260 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
261 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
263 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
264 ret <vscale x 8 x half> %evec
267 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
268 define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
269 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32:
271 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
272 ; CHECK-NEXT: vmv.v.i v8, 0
273 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
274 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
276 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277 ret <vscale x 8 x float> %evec
280 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
281 define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
282 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32:
284 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
285 ; CHECK-NEXT: vmv.v.i v8, 0
286 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
287 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
289 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290 ret <vscale x 8 x float> %evec
293 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
294 define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
295 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64:
297 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
298 ; CHECK-NEXT: vmv.v.i v8, 0
299 ; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
300 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
302 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303 ret <vscale x 8 x double> %evec
306 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
307 define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
308 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64:
310 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
311 ; CHECK-NEXT: vmv.v.i v8, 0
312 ; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
313 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
315 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316 ret <vscale x 8 x double> %evec
319 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
320 define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
321 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16:
323 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
324 ; CHECK-NEXT: vmv.v.i v8, 0
325 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
326 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
328 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329 ret <vscale x 16 x half> %evec
332 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
333 define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
334 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16:
336 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
337 ; CHECK-NEXT: vmv.v.i v8, 0
338 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
339 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
341 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342 ret <vscale x 16 x half> %evec
345 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
346 define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
347 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32:
349 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
350 ; CHECK-NEXT: vmv.v.i v8, 0
351 ; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
352 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
354 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355 ret <vscale x 16 x float> %evec
358 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
359 define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
360 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32:
362 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
363 ; CHECK-NEXT: vmv.v.i v8, 0
364 ; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
365 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
367 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368 ret <vscale x 16 x float> %evec
371 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
372 define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
373 ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16:
375 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
376 ; CHECK-NEXT: vmv.v.i v8, 0
377 ; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
378 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
380 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
381 ret <vscale x 32 x half> %evec
384 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
385 define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
386 ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16:
388 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
389 ; CHECK-NEXT: vmv.v.i v8, 0
390 ; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
391 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
393 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
394 ret <vscale x 32 x half> %evec
397 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
398 define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
399 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16:
401 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
402 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
403 ; CHECK-NEXT: vmv1r.v v8, v9
405 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
406 ret <vscale x 1 x half> %evec
409 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
410 define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
411 ; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16:
413 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
414 ; CHECK-NEXT: vadd.vv v8, v8, v8
415 ; CHECK-NEXT: vsra.vi v9, v8, 1
416 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
418 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
419 ret <vscale x 1 x half> %evec
422 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
423 define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
424 ; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16:
426 ; CHECK-NEXT: li a0, 127
427 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
428 ; CHECK-NEXT: vand.vx v9, v8, a0
429 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
431 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
432 ret <vscale x 1 x half> %evec
435 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
436 define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
437 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16:
439 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
440 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
441 ; CHECK-NEXT: vmv1r.v v8, v9
443 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
444 ret <vscale x 1 x half> %evec
447 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
448 define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
449 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32:
451 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
452 ; CHECK-NEXT: vsext.vf2 v9, v8
453 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
455 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
456 ret <vscale x 1 x float> %evec
459 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
460 define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
461 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32:
463 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
464 ; CHECK-NEXT: vzext.vf2 v9, v8
465 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
467 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
468 ret <vscale x 1 x float> %evec
471 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
472 define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
473 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64:
475 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
476 ; CHECK-NEXT: vsext.vf4 v9, v8
477 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
479 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
480 ret <vscale x 1 x double> %evec
483 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
484 define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
485 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64:
487 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
488 ; CHECK-NEXT: vzext.vf4 v9, v8
489 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
491 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
492 ret <vscale x 1 x double> %evec
495 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
496 define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
497 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16:
499 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
500 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
501 ; CHECK-NEXT: vmv1r.v v8, v9
503 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
504 ret <vscale x 2 x half> %evec
507 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
508 define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
509 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16:
511 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
512 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
513 ; CHECK-NEXT: vmv1r.v v8, v9
515 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
516 ret <vscale x 2 x half> %evec
519 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
520 define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
521 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32:
523 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
524 ; CHECK-NEXT: vsext.vf2 v9, v8
525 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
527 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
528 ret <vscale x 2 x float> %evec
531 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
532 define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
533 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32:
535 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
536 ; CHECK-NEXT: vzext.vf2 v9, v8
537 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
539 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
540 ret <vscale x 2 x float> %evec
543 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
544 define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
545 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64:
547 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
548 ; CHECK-NEXT: vsext.vf4 v10, v8
549 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
551 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
552 ret <vscale x 2 x double> %evec
555 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
556 define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
557 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64:
559 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
560 ; CHECK-NEXT: vzext.vf4 v10, v8
561 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
563 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
564 ret <vscale x 2 x double> %evec
567 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
568 define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
569 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16:
571 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
572 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
573 ; CHECK-NEXT: vmv1r.v v8, v9
575 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
576 ret <vscale x 4 x half> %evec
579 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
580 define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
581 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16:
583 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
584 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
585 ; CHECK-NEXT: vmv1r.v v8, v9
587 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
588 ret <vscale x 4 x half> %evec
591 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
592 define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
593 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32:
595 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
596 ; CHECK-NEXT: vsext.vf2 v10, v8
597 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
599 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
600 ret <vscale x 4 x float> %evec
603 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
604 define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
605 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32:
607 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
608 ; CHECK-NEXT: vzext.vf2 v10, v8
609 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
611 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
612 ret <vscale x 4 x float> %evec
615 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
616 define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
617 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64:
619 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
620 ; CHECK-NEXT: vsext.vf4 v12, v8
621 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
623 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
624 ret <vscale x 4 x double> %evec
627 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
628 define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
629 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64:
631 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
632 ; CHECK-NEXT: vzext.vf4 v12, v8
633 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
635 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
636 ret <vscale x 4 x double> %evec
639 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
640 define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
641 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16:
643 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
644 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
645 ; CHECK-NEXT: vmv2r.v v8, v10
647 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
648 ret <vscale x 8 x half> %evec
651 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
652 define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
653 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16:
655 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
656 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
657 ; CHECK-NEXT: vmv2r.v v8, v10
659 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
660 ret <vscale x 8 x half> %evec
663 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
664 define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
665 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32:
667 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
668 ; CHECK-NEXT: vsext.vf2 v12, v8
669 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
671 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
672 ret <vscale x 8 x float> %evec
675 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
676 define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
677 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32:
679 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
680 ; CHECK-NEXT: vzext.vf2 v12, v8
681 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
683 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
684 ret <vscale x 8 x float> %evec
687 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
688 define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
689 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64:
691 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
692 ; CHECK-NEXT: vsext.vf4 v16, v8
693 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
695 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
696 ret <vscale x 8 x double> %evec
699 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
700 define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
701 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64:
703 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
704 ; CHECK-NEXT: vzext.vf4 v16, v8
705 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
707 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
708 ret <vscale x 8 x double> %evec
711 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
712 define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
713 ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16:
715 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
716 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
717 ; CHECK-NEXT: vmv4r.v v8, v12
719 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
720 ret <vscale x 16 x half> %evec
723 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
724 define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
725 ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16:
727 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
728 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
729 ; CHECK-NEXT: vmv4r.v v8, v12
731 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
732 ret <vscale x 16 x half> %evec
735 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
736 define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
737 ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32:
739 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
740 ; CHECK-NEXT: vsext.vf2 v16, v8
741 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
743 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
744 ret <vscale x 16 x float> %evec
747 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
748 define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
749 ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32:
751 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
752 ; CHECK-NEXT: vzext.vf2 v16, v8
753 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
755 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
756 ret <vscale x 16 x float> %evec
759 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
760 define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
761 ; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16:
763 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
764 ; CHECK-NEXT: vfwcvt.f.x.v v16, v8
765 ; CHECK-NEXT: vmv8r.v v8, v16
767 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
768 ret <vscale x 32 x half> %evec
771 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
772 define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
773 ; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16:
775 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
776 ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
777 ; CHECK-NEXT: vmv8r.v v8, v16
779 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
780 ret <vscale x 32 x half> %evec
783 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
784 define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
785 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16:
787 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
788 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
790 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
791 ret <vscale x 1 x half> %evec
794 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
795 define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
796 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16:
798 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
799 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
801 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
802 ret <vscale x 1 x half> %evec
805 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
806 define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
807 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32:
809 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
810 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
811 ; CHECK-NEXT: vmv1r.v v8, v9
813 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
814 ret <vscale x 1 x float> %evec
817 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
818 define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
819 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32:
821 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
822 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
823 ; CHECK-NEXT: vmv1r.v v8, v9
825 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
826 ret <vscale x 1 x float> %evec
829 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
830 define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
831 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64:
833 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
834 ; CHECK-NEXT: vsext.vf2 v9, v8
835 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
837 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
838 ret <vscale x 1 x double> %evec
841 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
842 define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
843 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64:
845 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
846 ; CHECK-NEXT: vzext.vf2 v9, v8
847 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
849 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
850 ret <vscale x 1 x double> %evec
853 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
854 define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
855 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16:
857 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
858 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
860 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
861 ret <vscale x 2 x half> %evec
864 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
865 define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
866 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16:
868 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
869 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
871 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
872 ret <vscale x 2 x half> %evec
875 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
876 define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
877 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32:
879 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
880 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
881 ; CHECK-NEXT: vmv1r.v v8, v9
883 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
884 ret <vscale x 2 x float> %evec
887 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
888 define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
889 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32:
891 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
892 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
893 ; CHECK-NEXT: vmv1r.v v8, v9
895 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
896 ret <vscale x 2 x float> %evec
899 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
900 define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
901 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64:
903 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
904 ; CHECK-NEXT: vsext.vf2 v10, v8
905 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
907 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
908 ret <vscale x 2 x double> %evec
911 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
912 define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
913 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64:
915 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
916 ; CHECK-NEXT: vzext.vf2 v10, v8
917 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
919 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
920 ret <vscale x 2 x double> %evec
923 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
924 define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
925 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16:
927 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
928 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
930 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
931 ret <vscale x 4 x half> %evec
934 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
935 define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
936 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16:
938 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
939 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
941 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
942 ret <vscale x 4 x half> %evec
945 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
946 define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
947 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32:
949 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
950 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
951 ; CHECK-NEXT: vmv2r.v v8, v10
953 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
954 ret <vscale x 4 x float> %evec
957 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
958 define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
959 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32:
961 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
962 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
963 ; CHECK-NEXT: vmv2r.v v8, v10
965 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
966 ret <vscale x 4 x float> %evec
969 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
970 define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
971 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64:
973 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
974 ; CHECK-NEXT: vsext.vf2 v12, v8
975 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
977 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
978 ret <vscale x 4 x double> %evec
981 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
982 define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
983 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64:
985 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
986 ; CHECK-NEXT: vzext.vf2 v12, v8
987 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
989 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
990 ret <vscale x 4 x double> %evec
993 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
994 define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
995 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16:
997 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
998 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1000 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1001 ret <vscale x 8 x half> %evec
1004 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1005 define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
1006 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16:
1008 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1009 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1011 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1012 ret <vscale x 8 x half> %evec
1015 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1016 define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
1017 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32:
1019 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1020 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1021 ; CHECK-NEXT: vmv4r.v v8, v12
1023 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1024 ret <vscale x 8 x float> %evec
1027 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1028 define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
1029 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32:
1031 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1032 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1033 ; CHECK-NEXT: vmv4r.v v8, v12
1035 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1036 ret <vscale x 8 x float> %evec
1039 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1040 define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
1041 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64:
1043 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1044 ; CHECK-NEXT: vsext.vf2 v16, v8
1045 ; CHECK-NEXT: vfwcvt.f.x.v v8, v16
1047 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1048 ret <vscale x 8 x double> %evec
1051 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
1052 define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
1053 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64:
1055 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1056 ; CHECK-NEXT: vzext.vf2 v16, v8
1057 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
1059 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1060 ret <vscale x 8 x double> %evec
1063 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1064 define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
1065 ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16:
1067 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1068 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1070 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1071 ret <vscale x 16 x half> %evec
1074 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1075 define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
1076 ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16:
1078 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1079 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1081 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1082 ret <vscale x 16 x half> %evec
1085 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1086 define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
1087 ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32:
1089 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1090 ; CHECK-NEXT: vfwcvt.f.x.v v16, v8
1091 ; CHECK-NEXT: vmv8r.v v8, v16
1093 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1094 ret <vscale x 16 x float> %evec
1097 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
1098 define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
1099 ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32:
1101 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1102 ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
1103 ; CHECK-NEXT: vmv8r.v v8, v16
1105 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1106 ret <vscale x 16 x float> %evec
1109 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
1110 define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
1111 ; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16:
1113 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1114 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1116 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1117 ret <vscale x 32 x half> %evec
1120 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
1121 define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
1122 ; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16:
1124 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
1125 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1127 %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1128 ret <vscale x 32 x half> %evec
1131 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1132 define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
1133 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16:
1135 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1136 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1137 ; CHECK-NEXT: vmv1r.v v8, v9
1139 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1140 ret <vscale x 1 x half> %evec
1143 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1144 define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
1145 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16:
1147 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
1148 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1149 ; CHECK-NEXT: vmv1r.v v8, v9
1151 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1152 ret <vscale x 1 x half> %evec
1155 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1156 define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
1157 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32:
1159 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1160 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1162 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1163 ret <vscale x 1 x float> %evec
1166 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1167 define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
1168 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32:
1170 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1171 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1173 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1174 ret <vscale x 1 x float> %evec
1177 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1178 define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
1179 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64:
1181 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1182 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1183 ; CHECK-NEXT: vmv1r.v v8, v9
1185 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1186 ret <vscale x 1 x double> %evec
1189 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
1190 define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
1191 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64:
1193 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1194 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1195 ; CHECK-NEXT: vmv1r.v v8, v9
1197 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1198 ret <vscale x 1 x double> %evec
1201 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1202 define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
1203 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16:
1205 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
1206 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1207 ; CHECK-NEXT: vmv1r.v v8, v9
1209 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1210 ret <vscale x 2 x half> %evec
1213 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1214 define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
1215 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16:
1217 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
1218 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1219 ; CHECK-NEXT: vmv1r.v v8, v9
1221 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1222 ret <vscale x 2 x half> %evec
1225 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1226 define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
1227 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32:
1229 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1230 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1232 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1233 ret <vscale x 2 x float> %evec
1236 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1237 define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
1238 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32:
1240 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1241 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1243 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1244 ret <vscale x 2 x float> %evec
1247 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1248 define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
1249 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64:
1251 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1252 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1253 ; CHECK-NEXT: vmv2r.v v8, v10
1255 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1256 ret <vscale x 2 x double> %evec
1259 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
1260 define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
1261 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64:
1263 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1264 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1265 ; CHECK-NEXT: vmv2r.v v8, v10
1267 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1268 ret <vscale x 2 x double> %evec
1271 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1272 define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
1273 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16:
1275 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1276 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1277 ; CHECK-NEXT: vmv.v.v v8, v10
1279 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1280 ret <vscale x 4 x half> %evec
1283 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1284 define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
1285 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16:
1287 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1288 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1289 ; CHECK-NEXT: vmv.v.v v8, v10
1291 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1292 ret <vscale x 4 x half> %evec
1295 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1296 define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
1297 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32:
1299 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1300 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1302 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1303 ret <vscale x 4 x float> %evec
1306 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1307 define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
1308 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32:
1310 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1311 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1313 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1314 ret <vscale x 4 x float> %evec
1317 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1318 define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
1319 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64:
1321 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1322 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1323 ; CHECK-NEXT: vmv4r.v v8, v12
1325 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1326 ret <vscale x 4 x double> %evec
1329 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
1330 define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
1331 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64:
1333 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1334 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1335 ; CHECK-NEXT: vmv4r.v v8, v12
1337 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1338 ret <vscale x 4 x double> %evec
1341 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1342 define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
1343 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16:
1345 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1346 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1347 ; CHECK-NEXT: vmv.v.v v8, v12
1349 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1350 ret <vscale x 8 x half> %evec
1353 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1354 define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
1355 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16:
1357 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1358 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1359 ; CHECK-NEXT: vmv.v.v v8, v12
1361 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1362 ret <vscale x 8 x half> %evec
1365 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1366 define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
1367 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32:
1369 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1370 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1372 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1373 ret <vscale x 8 x float> %evec
1376 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1377 define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
1378 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32:
1380 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1381 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1383 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1384 ret <vscale x 8 x float> %evec
1387 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1388 define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
1389 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64:
1391 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1392 ; CHECK-NEXT: vfwcvt.f.x.v v16, v8
1393 ; CHECK-NEXT: vmv8r.v v8, v16
1395 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1396 ret <vscale x 8 x double> %evec
1399 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
1400 define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
1401 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64:
1403 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1404 ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
1405 ; CHECK-NEXT: vmv8r.v v8, v16
1407 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1408 ret <vscale x 8 x double> %evec
1411 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1412 define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
1413 ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16:
1415 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1416 ; CHECK-NEXT: vfncvt.f.x.w v16, v8
1417 ; CHECK-NEXT: vmv.v.v v8, v16
1419 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1420 ret <vscale x 16 x half> %evec
1423 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1424 define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
1425 ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16:
1427 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
1428 ; CHECK-NEXT: vfncvt.f.xu.w v16, v8
1429 ; CHECK-NEXT: vmv.v.v v8, v16
1431 %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1432 ret <vscale x 16 x half> %evec
1435 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1436 define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
1437 ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32:
1439 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1440 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1442 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1443 ret <vscale x 16 x float> %evec
1446 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
1447 define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
1448 ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32:
1450 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
1451 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1453 %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1454 ret <vscale x 16 x float> %evec
1457 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1458 define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
1459 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16:
1461 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1462 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1463 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1464 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1466 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1467 ret <vscale x 1 x half> %evec
1470 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1471 define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
1472 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16:
1474 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1475 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1476 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1477 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1479 %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1480 ret <vscale x 1 x half> %evec
1483 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1484 define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
1485 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32:
1487 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1488 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1489 ; CHECK-NEXT: vmv1r.v v8, v9
1491 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1492 ret <vscale x 1 x float> %evec
1495 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1496 define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
1497 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32:
1499 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
1500 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1501 ; CHECK-NEXT: vmv1r.v v8, v9
1503 %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1504 ret <vscale x 1 x float> %evec
1507 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1508 define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
1509 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64:
1511 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1512 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1514 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1515 ret <vscale x 1 x double> %evec
1518 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
1519 define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
1520 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64:
1522 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
1523 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1525 %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1526 ret <vscale x 1 x double> %evec
1530 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1531 define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
1532 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16:
1534 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1535 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1536 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1537 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1539 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1540 ret <vscale x 2 x half> %evec
1543 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1544 define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
1545 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16:
1547 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1548 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1549 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1550 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1552 %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1553 ret <vscale x 2 x half> %evec
1556 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1557 define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
1558 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32:
1560 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1561 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1562 ; CHECK-NEXT: vmv.v.v v8, v10
1564 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1565 ret <vscale x 2 x float> %evec
1568 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1569 define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
1570 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32:
1572 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1573 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1574 ; CHECK-NEXT: vmv.v.v v8, v10
1576 %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1577 ret <vscale x 2 x float> %evec
1580 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1581 define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
1582 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64:
1584 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1585 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1587 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1588 ret <vscale x 2 x double> %evec
1591 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
1592 define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
1593 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64:
1595 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1596 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1598 %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1599 ret <vscale x 2 x double> %evec
1602 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1603 define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
1604 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16:
1606 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1607 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1608 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1609 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1611 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1612 ret <vscale x 4 x half> %evec
1615 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1616 define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
1617 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16:
1619 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1620 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1621 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1622 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1624 %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1625 ret <vscale x 4 x half> %evec
1628 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1629 define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
1630 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32:
1632 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1633 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1634 ; CHECK-NEXT: vmv.v.v v8, v12
1636 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1637 ret <vscale x 4 x float> %evec
1640 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1641 define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
1642 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32:
1644 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1645 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1646 ; CHECK-NEXT: vmv.v.v v8, v12
1648 %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1649 ret <vscale x 4 x float> %evec
1652 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1653 define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
1654 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64:
1656 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1657 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1659 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1660 ret <vscale x 4 x double> %evec
1663 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
1664 define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
1665 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64:
1667 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1668 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1670 %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1671 ret <vscale x 4 x double> %evec
1674 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1675 define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
1676 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16:
1678 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1679 ; CHECK-NEXT: vfncvt.f.x.w v16, v8
1680 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
1681 ; CHECK-NEXT: vfncvt.f.f.w v8, v16
1683 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1684 ret <vscale x 8 x half> %evec
1687 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1688 define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
1689 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16:
1691 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1692 ; CHECK-NEXT: vfncvt.f.xu.w v16, v8
1693 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
1694 ; CHECK-NEXT: vfncvt.f.f.w v8, v16
1696 %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1697 ret <vscale x 8 x half> %evec
1700 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1701 define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
1702 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32:
1704 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1705 ; CHECK-NEXT: vfncvt.f.x.w v16, v8
1706 ; CHECK-NEXT: vmv.v.v v8, v16
1708 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1709 ret <vscale x 8 x float> %evec
1712 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1713 define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
1714 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32:
1716 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1717 ; CHECK-NEXT: vfncvt.f.xu.w v16, v8
1718 ; CHECK-NEXT: vmv.v.v v8, v16
1720 %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1721 ret <vscale x 8 x float> %evec
1724 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1725 define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
1726 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64:
1728 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1729 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1731 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1732 ret <vscale x 8 x double> %evec
1735 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
1736 define <vscale x 8 x double> @vuitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
1737 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64:
1739 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1740 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1742 %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1743 ret <vscale x 8 x double> %evec