1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
8 define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
9 ; CHECK-LABEL: vsitofp_v1i1_v1f16:
11 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
12 ; CHECK-NEXT: vmv.s.x v8, zero
13 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
14 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
16 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
20 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
21 define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
22 ; CHECK-LABEL: vuitofp_v1i1_v1f16:
24 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
25 ; CHECK-NEXT: vmv.s.x v8, zero
26 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
27 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
29 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
33 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
34 define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
35 ; CHECK-LABEL: vsitofp_v1i1_v1f32:
37 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
38 ; CHECK-NEXT: vmv.s.x v8, zero
39 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
40 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
42 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
46 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
47 define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
48 ; CHECK-LABEL: vuitofp_v1i1_v1f32:
50 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
51 ; CHECK-NEXT: vmv.s.x v8, zero
52 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
53 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
55 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
59 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
60 define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
61 ; CHECK-LABEL: vsitofp_v1i1_v1f64:
63 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
64 ; CHECK-NEXT: vmv.s.x v8, zero
65 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
66 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
68 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69 ret <1 x double> %evec
72 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
73 define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
74 ; CHECK-LABEL: vuitofp_v1i1_v1f64:
76 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
77 ; CHECK-NEXT: vmv.s.x v8, zero
78 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
79 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
81 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82 ret <1 x double> %evec
85 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
86 define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
87 ; CHECK-LABEL: vsitofp_v2i1_v2f16:
89 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
90 ; CHECK-NEXT: vmv.v.i v8, 0
91 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
92 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
94 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
98 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
99 define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
100 ; CHECK-LABEL: vuitofp_v2i1_v2f16:
102 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
103 ; CHECK-NEXT: vmv.v.i v8, 0
104 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
105 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
107 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
111 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
112 define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
113 ; CHECK-LABEL: vsitofp_v2i1_v2f32:
115 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
116 ; CHECK-NEXT: vmv.v.i v8, 0
117 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
118 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
120 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121 ret <2 x float> %evec
124 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
125 define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
126 ; CHECK-LABEL: vuitofp_v2i1_v2f32:
128 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
129 ; CHECK-NEXT: vmv.v.i v8, 0
130 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
131 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
133 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134 ret <2 x float> %evec
137 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
138 define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
139 ; CHECK-LABEL: vsitofp_v2i1_v2f64:
141 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
142 ; CHECK-NEXT: vmv.v.i v8, 0
143 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
144 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
146 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147 ret <2 x double> %evec
150 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
151 define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
152 ; CHECK-LABEL: vuitofp_v2i1_v2f64:
154 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
155 ; CHECK-NEXT: vmv.v.i v8, 0
156 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
157 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
159 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160 ret <2 x double> %evec
163 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
164 define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
165 ; CHECK-LABEL: vsitofp_v4i1_v4f16:
167 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
168 ; CHECK-NEXT: vmv.v.i v8, 0
169 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
170 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
172 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
176 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
177 define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
178 ; CHECK-LABEL: vuitofp_v4i1_v4f16:
180 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
181 ; CHECK-NEXT: vmv.v.i v8, 0
182 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
183 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
185 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
189 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
190 define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
191 ; CHECK-LABEL: vsitofp_v4i1_v4f32:
193 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
194 ; CHECK-NEXT: vmv.v.i v8, 0
195 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
196 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
198 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199 ret <4 x float> %evec
202 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
203 define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
204 ; CHECK-LABEL: vuitofp_v4i1_v4f32:
206 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
207 ; CHECK-NEXT: vmv.v.i v8, 0
208 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
209 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
211 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212 ret <4 x float> %evec
215 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
216 define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
217 ; CHECK-LABEL: vsitofp_v4i1_v4f64:
219 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
220 ; CHECK-NEXT: vmv.v.i v8, 0
221 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
222 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
224 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225 ret <4 x double> %evec
228 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
229 define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
230 ; CHECK-LABEL: vuitofp_v4i1_v4f64:
232 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
233 ; CHECK-NEXT: vmv.v.i v8, 0
234 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
235 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
237 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238 ret <4 x double> %evec
241 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
242 define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
243 ; CHECK-LABEL: vsitofp_v8i1_v8f16:
245 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
246 ; CHECK-NEXT: vmv.v.i v8, 0
247 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
248 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
250 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
254 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
255 define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
256 ; CHECK-LABEL: vuitofp_v8i1_v8f16:
258 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
259 ; CHECK-NEXT: vmv.v.i v8, 0
260 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
261 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
263 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
267 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
268 define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
269 ; CHECK-LABEL: vsitofp_v8i1_v8f32:
271 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
272 ; CHECK-NEXT: vmv.v.i v8, 0
273 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
274 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
276 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277 ret <8 x float> %evec
280 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
281 define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
282 ; CHECK-LABEL: vuitofp_v8i1_v8f32:
284 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
285 ; CHECK-NEXT: vmv.v.i v8, 0
286 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
287 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
289 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290 ret <8 x float> %evec
293 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
294 define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
295 ; CHECK-LABEL: vsitofp_v8i1_v8f64:
297 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
298 ; CHECK-NEXT: vmv.v.i v8, 0
299 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
300 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
302 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303 ret <8 x double> %evec
306 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
307 define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
308 ; CHECK-LABEL: vuitofp_v8i1_v8f64:
310 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
311 ; CHECK-NEXT: vmv.v.i v8, 0
312 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
313 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
315 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316 ret <8 x double> %evec
319 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
320 define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
321 ; CHECK-LABEL: vsitofp_v16i1_v16f16:
323 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
324 ; CHECK-NEXT: vmv.v.i v8, 0
325 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
326 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
328 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329 ret <16 x half> %evec
332 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
333 define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
334 ; CHECK-LABEL: vuitofp_v16i1_v16f16:
336 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
337 ; CHECK-NEXT: vmv.v.i v8, 0
338 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
339 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
341 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342 ret <16 x half> %evec
345 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
346 define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
347 ; CHECK-LABEL: vsitofp_v16i1_v16f32:
349 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
350 ; CHECK-NEXT: vmv.v.i v8, 0
351 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
352 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
354 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355 ret <16 x float> %evec
358 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
359 define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
360 ; CHECK-LABEL: vuitofp_v16i1_v16f32:
362 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
363 ; CHECK-NEXT: vmv.v.i v8, 0
364 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
365 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
367 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368 ret <16 x float> %evec
371 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
372 define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
373 ; CHECK-LABEL: vsitofp_v32i1_v32f16:
375 ; CHECK-NEXT: li a0, 32
376 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
377 ; CHECK-NEXT: vmv.v.i v8, 0
378 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
379 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
381 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
382 ret <32 x half> %evec
385 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
386 define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
387 ; CHECK-LABEL: vuitofp_v32i1_v32f16:
389 ; CHECK-NEXT: li a0, 32
390 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
391 ; CHECK-NEXT: vmv.v.i v8, 0
392 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
393 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
395 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
396 ret <32 x half> %evec
399 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
400 define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
401 ; CHECK-LABEL: vsitofp_v1i8_v1f16:
403 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
404 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
405 ; CHECK-NEXT: vmv1r.v v8, v9
407 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
411 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
412 define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
413 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
417 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
418 define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
419 ; CHECK-LABEL: vuitofp_v1i7_v1f16:
421 ; CHECK-NEXT: addi sp, sp, -16
422 ; CHECK-NEXT: .cfi_def_cfa_offset 16
423 ; CHECK-NEXT: andi a0, a0, 127
424 ; CHECK-NEXT: fcvt.h.wu fa5, a0
425 ; CHECK-NEXT: fsh fa5, 14(sp)
426 ; CHECK-NEXT: addi a0, sp, 14
427 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
428 ; CHECK-NEXT: vle16.v v8, (a0)
429 ; CHECK-NEXT: addi sp, sp, 16
431 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
435 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
436 define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
437 ; CHECK-LABEL: vuitofp_v1i8_v1f16:
439 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
440 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
441 ; CHECK-NEXT: vmv1r.v v8, v9
443 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
447 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
448 define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
449 ; CHECK-LABEL: vsitofp_v1i8_v1f32:
451 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
452 ; CHECK-NEXT: vsext.vf2 v9, v8
453 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
455 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
456 ret <1 x float> %evec
459 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
460 define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
461 ; CHECK-LABEL: vuitofp_v1i8_v1f32:
463 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
464 ; CHECK-NEXT: vzext.vf2 v9, v8
465 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
467 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
468 ret <1 x float> %evec
471 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
472 define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
473 ; CHECK-LABEL: vsitofp_v1i8_v1f64:
475 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
476 ; CHECK-NEXT: vsext.vf4 v9, v8
477 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
479 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
480 ret <1 x double> %evec
483 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
484 define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
485 ; CHECK-LABEL: vuitofp_v1i8_v1f64:
487 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
488 ; CHECK-NEXT: vzext.vf4 v9, v8
489 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
491 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
492 ret <1 x double> %evec
495 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
496 define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
497 ; CHECK-LABEL: vsitofp_v2i8_v2f16:
499 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
500 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
501 ; CHECK-NEXT: vmv1r.v v8, v9
503 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
507 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
508 define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
509 ; CHECK-LABEL: vuitofp_v2i8_v2f16:
511 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
512 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
513 ; CHECK-NEXT: vmv1r.v v8, v9
515 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
519 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
520 define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
521 ; CHECK-LABEL: vsitofp_v2i8_v2f32:
523 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
524 ; CHECK-NEXT: vsext.vf2 v9, v8
525 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
527 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
528 ret <2 x float> %evec
531 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
532 define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
533 ; CHECK-LABEL: vuitofp_v2i8_v2f32:
535 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
536 ; CHECK-NEXT: vzext.vf2 v9, v8
537 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
539 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
540 ret <2 x float> %evec
543 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
544 define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
545 ; CHECK-LABEL: vsitofp_v2i8_v2f64:
547 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
548 ; CHECK-NEXT: vsext.vf4 v9, v8
549 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
551 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
552 ret <2 x double> %evec
555 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
556 define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
557 ; CHECK-LABEL: vuitofp_v2i8_v2f64:
559 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
560 ; CHECK-NEXT: vzext.vf4 v9, v8
561 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
563 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
564 ret <2 x double> %evec
567 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
568 define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
569 ; CHECK-LABEL: vsitofp_v4i8_v4f16:
571 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
572 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
573 ; CHECK-NEXT: vmv1r.v v8, v9
575 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
579 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
580 define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
581 ; CHECK-LABEL: vuitofp_v4i8_v4f16:
583 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
584 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
585 ; CHECK-NEXT: vmv1r.v v8, v9
587 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
591 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
592 define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
593 ; CHECK-LABEL: vsitofp_v4i8_v4f32:
595 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
596 ; CHECK-NEXT: vsext.vf2 v9, v8
597 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
599 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
600 ret <4 x float> %evec
603 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
604 define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
605 ; CHECK-LABEL: vuitofp_v4i8_v4f32:
607 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
608 ; CHECK-NEXT: vzext.vf2 v9, v8
609 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
611 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
612 ret <4 x float> %evec
615 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
616 define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
617 ; CHECK-LABEL: vsitofp_v4i8_v4f64:
619 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
620 ; CHECK-NEXT: vsext.vf4 v10, v8
621 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
623 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
624 ret <4 x double> %evec
627 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
628 define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
629 ; CHECK-LABEL: vuitofp_v4i8_v4f64:
631 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
632 ; CHECK-NEXT: vzext.vf4 v10, v8
633 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
635 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
636 ret <4 x double> %evec
639 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
640 define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
641 ; CHECK-LABEL: vsitofp_v8i8_v8f16:
643 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
644 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
645 ; CHECK-NEXT: vmv1r.v v8, v9
647 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
651 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
652 define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
653 ; CHECK-LABEL: vuitofp_v8i8_v8f16:
655 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
656 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
657 ; CHECK-NEXT: vmv1r.v v8, v9
659 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
663 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
664 define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
665 ; CHECK-LABEL: vsitofp_v8i8_v8f32:
667 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
668 ; CHECK-NEXT: vsext.vf2 v10, v8
669 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
671 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
672 ret <8 x float> %evec
675 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
676 define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
677 ; CHECK-LABEL: vuitofp_v8i8_v8f32:
679 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
680 ; CHECK-NEXT: vzext.vf2 v10, v8
681 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
683 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
684 ret <8 x float> %evec
687 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
688 define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
689 ; CHECK-LABEL: vsitofp_v8i8_v8f64:
691 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
692 ; CHECK-NEXT: vsext.vf4 v12, v8
693 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
695 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
696 ret <8 x double> %evec
699 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
700 define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
701 ; CHECK-LABEL: vuitofp_v8i8_v8f64:
703 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
704 ; CHECK-NEXT: vzext.vf4 v12, v8
705 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
707 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
708 ret <8 x double> %evec
711 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
712 define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
713 ; CHECK-LABEL: vsitofp_v16i8_v16f16:
715 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
716 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
717 ; CHECK-NEXT: vmv2r.v v8, v10
719 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
720 ret <16 x half> %evec
723 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
724 define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
725 ; CHECK-LABEL: vuitofp_v16i8_v16f16:
727 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
728 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
729 ; CHECK-NEXT: vmv2r.v v8, v10
731 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
732 ret <16 x half> %evec
735 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
736 define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
737 ; CHECK-LABEL: vsitofp_v16i8_v16f32:
739 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
740 ; CHECK-NEXT: vsext.vf2 v12, v8
741 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
743 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
744 ret <16 x float> %evec
747 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
748 define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
749 ; CHECK-LABEL: vuitofp_v16i8_v16f32:
751 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
752 ; CHECK-NEXT: vzext.vf2 v12, v8
753 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
755 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
756 ret <16 x float> %evec
759 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
760 define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
761 ; CHECK-LABEL: vsitofp_v32i8_v32f16:
763 ; CHECK-NEXT: li a0, 32
764 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
765 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
766 ; CHECK-NEXT: vmv4r.v v8, v12
768 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
769 ret <32 x half> %evec
772 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
773 define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
774 ; CHECK-LABEL: vuitofp_v32i8_v32f16:
776 ; CHECK-NEXT: li a0, 32
777 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
778 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
779 ; CHECK-NEXT: vmv4r.v v8, v12
781 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
782 ret <32 x half> %evec
785 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
786 define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
787 ; CHECK-LABEL: vsitofp_v1i16_v1f16:
789 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
790 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
792 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
796 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
797 define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
798 ; CHECK-LABEL: vuitofp_v1i16_v1f16:
800 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
801 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
803 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
807 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
808 define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
809 ; CHECK-LABEL: vsitofp_v1i16_v1f32:
811 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
812 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
813 ; CHECK-NEXT: vmv1r.v v8, v9
815 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
816 ret <1 x float> %evec
819 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
820 define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
821 ; CHECK-LABEL: vuitofp_v1i16_v1f32:
823 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
824 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
825 ; CHECK-NEXT: vmv1r.v v8, v9
827 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
828 ret <1 x float> %evec
831 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
832 define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
833 ; CHECK-LABEL: vsitofp_v1i16_v1f64:
835 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
836 ; CHECK-NEXT: vsext.vf2 v9, v8
837 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
839 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
840 ret <1 x double> %evec
843 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
844 define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
845 ; CHECK-LABEL: vuitofp_v1i16_v1f64:
847 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
848 ; CHECK-NEXT: vzext.vf2 v9, v8
849 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
851 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
852 ret <1 x double> %evec
855 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
856 define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
857 ; CHECK-LABEL: vsitofp_v2i16_v2f16:
859 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
860 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
862 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
866 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
867 define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
868 ; CHECK-LABEL: vuitofp_v2i16_v2f16:
870 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
871 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
873 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
877 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
878 define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
879 ; CHECK-LABEL: vsitofp_v2i16_v2f32:
881 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
882 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
883 ; CHECK-NEXT: vmv1r.v v8, v9
885 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
886 ret <2 x float> %evec
889 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
890 define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
891 ; CHECK-LABEL: vuitofp_v2i16_v2f32:
893 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
894 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
895 ; CHECK-NEXT: vmv1r.v v8, v9
897 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
898 ret <2 x float> %evec
901 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
902 define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
903 ; CHECK-LABEL: vsitofp_v2i16_v2f64:
905 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
906 ; CHECK-NEXT: vsext.vf2 v9, v8
907 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
909 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
910 ret <2 x double> %evec
913 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
914 define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
915 ; CHECK-LABEL: vuitofp_v2i16_v2f64:
917 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
918 ; CHECK-NEXT: vzext.vf2 v9, v8
919 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
921 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
922 ret <2 x double> %evec
925 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
926 define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
927 ; CHECK-LABEL: vsitofp_v4i16_v4f16:
929 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
930 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
932 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
936 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
937 define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
938 ; CHECK-LABEL: vuitofp_v4i16_v4f16:
940 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
941 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
943 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
947 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
948 define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
949 ; CHECK-LABEL: vsitofp_v4i16_v4f32:
951 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
952 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
953 ; CHECK-NEXT: vmv1r.v v8, v9
955 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
956 ret <4 x float> %evec
959 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
960 define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
961 ; CHECK-LABEL: vuitofp_v4i16_v4f32:
963 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
964 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
965 ; CHECK-NEXT: vmv1r.v v8, v9
967 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
968 ret <4 x float> %evec
971 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
972 define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
973 ; CHECK-LABEL: vsitofp_v4i16_v4f64:
975 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
976 ; CHECK-NEXT: vsext.vf2 v10, v8
977 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
979 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
980 ret <4 x double> %evec
983 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
984 define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
985 ; CHECK-LABEL: vuitofp_v4i16_v4f64:
987 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
988 ; CHECK-NEXT: vzext.vf2 v10, v8
989 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
991 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
992 ret <4 x double> %evec
995 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
996 define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
997 ; CHECK-LABEL: vsitofp_v8i16_v8f16:
999 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1000 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1002 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1003 ret <8 x half> %evec
1006 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1007 define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1008 ; CHECK-LABEL: vuitofp_v8i16_v8f16:
1010 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1011 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1013 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1014 ret <8 x half> %evec
1017 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1018 define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1019 ; CHECK-LABEL: vsitofp_v8i16_v8f32:
1021 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1022 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1023 ; CHECK-NEXT: vmv2r.v v8, v10
1025 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1026 ret <8 x float> %evec
1029 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1030 define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1031 ; CHECK-LABEL: vuitofp_v8i16_v8f32:
1033 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1034 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1035 ; CHECK-NEXT: vmv2r.v v8, v10
1037 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1038 ret <8 x float> %evec
1041 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1042 define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1043 ; CHECK-LABEL: vsitofp_v8i16_v8f64:
1045 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1046 ; CHECK-NEXT: vsext.vf2 v12, v8
1047 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
1049 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1050 ret <8 x double> %evec
1053 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1054 define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1055 ; CHECK-LABEL: vuitofp_v8i16_v8f64:
1057 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1058 ; CHECK-NEXT: vzext.vf2 v12, v8
1059 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
1061 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1062 ret <8 x double> %evec
1065 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1066 define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1067 ; CHECK-LABEL: vsitofp_v16i16_v16f16:
1069 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1070 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1072 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1073 ret <16 x half> %evec
1076 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1077 define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1078 ; CHECK-LABEL: vuitofp_v16i16_v16f16:
1080 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1081 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1083 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1084 ret <16 x half> %evec
1087 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1088 define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1089 ; CHECK-LABEL: vsitofp_v16i16_v16f32:
1091 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1092 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1093 ; CHECK-NEXT: vmv4r.v v8, v12
1095 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1096 ret <16 x float> %evec
1099 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1100 define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1101 ; CHECK-LABEL: vuitofp_v16i16_v16f32:
1103 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1104 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1105 ; CHECK-NEXT: vmv4r.v v8, v12
1107 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1108 ret <16 x float> %evec
1111 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1112 define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1113 ; CHECK-LABEL: vsitofp_v32i16_v32f16:
1115 ; CHECK-NEXT: li a0, 32
1116 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1117 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1119 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1120 ret <32 x half> %evec
1123 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1124 define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1125 ; CHECK-LABEL: vuitofp_v32i16_v32f16:
1127 ; CHECK-NEXT: li a0, 32
1128 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1129 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1131 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1132 ret <32 x half> %evec
1135 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1136 define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1137 ; CHECK-LABEL: vsitofp_v1i32_v1f16:
1139 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1140 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1141 ; CHECK-NEXT: vmv1r.v v8, v9
1143 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1144 ret <1 x half> %evec
1147 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1148 define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1149 ; CHECK-LABEL: vuitofp_v1i32_v1f16:
1151 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1152 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1153 ; CHECK-NEXT: vmv1r.v v8, v9
1155 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1156 ret <1 x half> %evec
1159 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1160 define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1161 ; CHECK-LABEL: vsitofp_v1i32_v1f32:
1163 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1164 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1166 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1167 ret <1 x float> %evec
1170 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1171 define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1172 ; CHECK-LABEL: vuitofp_v1i32_v1f32:
1174 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1175 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1177 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1178 ret <1 x float> %evec
1181 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1182 define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1183 ; CHECK-LABEL: vsitofp_v1i32_v1f64:
1185 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1186 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1187 ; CHECK-NEXT: vmv1r.v v8, v9
1189 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1190 ret <1 x double> %evec
1193 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1194 define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1195 ; CHECK-LABEL: vuitofp_v1i32_v1f64:
1197 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1198 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1199 ; CHECK-NEXT: vmv1r.v v8, v9
1201 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1202 ret <1 x double> %evec
1205 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1206 define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1207 ; CHECK-LABEL: vsitofp_v2i32_v2f16:
1209 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1210 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1211 ; CHECK-NEXT: vmv1r.v v8, v9
1213 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1214 ret <2 x half> %evec
1217 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1218 define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1219 ; CHECK-LABEL: vuitofp_v2i32_v2f16:
1221 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1222 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1223 ; CHECK-NEXT: vmv1r.v v8, v9
1225 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1226 ret <2 x half> %evec
1229 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1230 define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1231 ; CHECK-LABEL: vsitofp_v2i32_v2f32:
1233 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1234 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1236 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1237 ret <2 x float> %evec
1240 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1241 define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1242 ; CHECK-LABEL: vuitofp_v2i32_v2f32:
1244 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1245 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1247 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1248 ret <2 x float> %evec
1251 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1252 define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1253 ; CHECK-LABEL: vsitofp_v2i32_v2f64:
1255 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1256 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1257 ; CHECK-NEXT: vmv1r.v v8, v9
1259 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1260 ret <2 x double> %evec
1263 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1264 define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1265 ; CHECK-LABEL: vuitofp_v2i32_v2f64:
1267 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1268 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1269 ; CHECK-NEXT: vmv1r.v v8, v9
1271 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1272 ret <2 x double> %evec
1275 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1276 define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1277 ; CHECK-LABEL: vsitofp_v4i32_v4f16:
1279 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1280 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1281 ; CHECK-NEXT: vmv1r.v v8, v9
1283 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1284 ret <4 x half> %evec
1287 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1288 define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1289 ; CHECK-LABEL: vuitofp_v4i32_v4f16:
1291 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1292 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1293 ; CHECK-NEXT: vmv1r.v v8, v9
1295 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1296 ret <4 x half> %evec
1299 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1300 define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1301 ; CHECK-LABEL: vsitofp_v4i32_v4f32:
1303 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1304 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1306 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1307 ret <4 x float> %evec
1310 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1311 define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1312 ; CHECK-LABEL: vuitofp_v4i32_v4f32:
1314 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1315 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1317 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1318 ret <4 x float> %evec
1321 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1322 define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1323 ; CHECK-LABEL: vsitofp_v4i32_v4f64:
1325 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1326 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1327 ; CHECK-NEXT: vmv2r.v v8, v10
1329 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1330 ret <4 x double> %evec
1333 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1334 define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1335 ; CHECK-LABEL: vuitofp_v4i32_v4f64:
1337 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1338 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1339 ; CHECK-NEXT: vmv2r.v v8, v10
1341 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1342 ret <4 x double> %evec
1345 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1346 define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1347 ; CHECK-LABEL: vsitofp_v8i32_v8f16:
1349 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1350 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1351 ; CHECK-NEXT: vmv.v.v v8, v10
1353 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1354 ret <8 x half> %evec
1357 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1358 define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1359 ; CHECK-LABEL: vuitofp_v8i32_v8f16:
1361 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1362 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1363 ; CHECK-NEXT: vmv.v.v v8, v10
1365 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1366 ret <8 x half> %evec
1369 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1370 define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1371 ; CHECK-LABEL: vsitofp_v8i32_v8f32:
1373 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1374 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1376 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1377 ret <8 x float> %evec
1380 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1381 define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1382 ; CHECK-LABEL: vuitofp_v8i32_v8f32:
1384 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1385 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1387 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1388 ret <8 x float> %evec
1391 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1392 define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1393 ; CHECK-LABEL: vsitofp_v8i32_v8f64:
1395 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1396 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1397 ; CHECK-NEXT: vmv4r.v v8, v12
1399 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1400 ret <8 x double> %evec
1403 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1404 define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1405 ; CHECK-LABEL: vuitofp_v8i32_v8f64:
1407 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1408 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1409 ; CHECK-NEXT: vmv4r.v v8, v12
1411 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1412 ret <8 x double> %evec
1415 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1416 define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1417 ; CHECK-LABEL: vsitofp_v16i32_v16f16:
1419 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1420 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1421 ; CHECK-NEXT: vmv.v.v v8, v12
1423 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1424 ret <16 x half> %evec
1427 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1428 define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1429 ; CHECK-LABEL: vuitofp_v16i32_v16f16:
1431 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1432 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1433 ; CHECK-NEXT: vmv.v.v v8, v12
1435 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1436 ret <16 x half> %evec
1439 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1440 define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1441 ; CHECK-LABEL: vsitofp_v16i32_v16f32:
1443 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1444 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1446 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1447 ret <16 x float> %evec
1450 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1451 define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1452 ; CHECK-LABEL: vuitofp_v16i32_v16f32:
1454 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1455 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1457 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1458 ret <16 x float> %evec
1461 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1462 define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1463 ; CHECK-LABEL: vsitofp_v1i64_v1f16:
1465 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1466 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1467 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1468 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1470 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1471 ret <1 x half> %evec
1474 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1475 define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1476 ; CHECK-LABEL: vuitofp_v1i64_v1f16:
1478 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1479 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1480 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1481 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1483 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1484 ret <1 x half> %evec
1487 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1488 define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1489 ; CHECK-LABEL: vsitofp_v1i64_v1f32:
1491 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1492 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1493 ; CHECK-NEXT: vmv1r.v v8, v9
1495 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1496 ret <1 x float> %evec
1499 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1500 define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1501 ; CHECK-LABEL: vuitofp_v1i64_v1f32:
1503 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1504 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1505 ; CHECK-NEXT: vmv1r.v v8, v9
1507 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1508 ret <1 x float> %evec
1511 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1512 define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1513 ; CHECK-LABEL: vsitofp_v1i64_v1f64:
1515 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1516 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1518 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1519 ret <1 x double> %evec
1522 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1523 define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1524 ; CHECK-LABEL: vuitofp_v1i64_v1f64:
1526 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1527 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1529 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1530 ret <1 x double> %evec
1534 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1535 define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1536 ; CHECK-LABEL: vsitofp_v2i64_v2f16:
1538 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1539 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1540 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1541 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1543 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1544 ret <2 x half> %evec
1547 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1548 define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1549 ; CHECK-LABEL: vuitofp_v2i64_v2f16:
1551 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1552 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1553 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1554 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1556 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1557 ret <2 x half> %evec
1560 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1561 define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1562 ; CHECK-LABEL: vsitofp_v2i64_v2f32:
1564 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1565 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1566 ; CHECK-NEXT: vmv1r.v v8, v9
1568 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1569 ret <2 x float> %evec
1572 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1573 define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1574 ; CHECK-LABEL: vuitofp_v2i64_v2f32:
1576 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1577 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1578 ; CHECK-NEXT: vmv1r.v v8, v9
1580 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1581 ret <2 x float> %evec
1584 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1585 define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1586 ; CHECK-LABEL: vsitofp_v2i64_v2f64:
1588 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1589 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1591 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1592 ret <2 x double> %evec
1595 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1596 define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1597 ; CHECK-LABEL: vuitofp_v2i64_v2f64:
1599 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1600 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1602 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1603 ret <2 x double> %evec
1606 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1607 define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1608 ; CHECK-LABEL: vsitofp_v4i64_v4f16:
1610 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1611 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1612 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1613 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1615 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1616 ret <4 x half> %evec
1619 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1620 define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1621 ; CHECK-LABEL: vuitofp_v4i64_v4f16:
1623 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1624 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1625 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1626 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1628 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1629 ret <4 x half> %evec
1632 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1633 define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1634 ; CHECK-LABEL: vsitofp_v4i64_v4f32:
1636 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1637 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1638 ; CHECK-NEXT: vmv.v.v v8, v10
1640 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1641 ret <4 x float> %evec
1644 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1645 define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1646 ; CHECK-LABEL: vuitofp_v4i64_v4f32:
1648 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1649 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1650 ; CHECK-NEXT: vmv.v.v v8, v10
1652 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1653 ret <4 x float> %evec
1656 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1657 define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1658 ; CHECK-LABEL: vsitofp_v4i64_v4f64:
1660 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1661 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1663 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1664 ret <4 x double> %evec
1667 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1668 define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1669 ; CHECK-LABEL: vuitofp_v4i64_v4f64:
1671 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1672 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1674 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1675 ret <4 x double> %evec
1678 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1679 define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1680 ; CHECK-LABEL: vsitofp_v8i64_v8f16:
1682 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1683 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1684 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1685 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1687 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1688 ret <8 x half> %evec
1691 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1692 define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1693 ; CHECK-LABEL: vuitofp_v8i64_v8f16:
1695 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1696 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1697 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1698 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1700 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1701 ret <8 x half> %evec
1704 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1705 define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1706 ; CHECK-LABEL: vsitofp_v8i64_v8f32:
1708 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1709 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1710 ; CHECK-NEXT: vmv.v.v v8, v12
1712 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1713 ret <8 x float> %evec
1716 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1717 define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1718 ; CHECK-LABEL: vuitofp_v8i64_v8f32:
1720 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1721 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1722 ; CHECK-NEXT: vmv.v.v v8, v12
1724 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1725 ret <8 x float> %evec
1728 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1729 define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1730 ; CHECK-LABEL: vsitofp_v8i64_v8f64:
1732 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1733 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1735 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1736 ret <8 x double> %evec
1739 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1740 define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1741 ; CHECK-LABEL: vuitofp_v8i64_v8f64:
1743 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1744 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1746 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1747 ret <8 x double> %evec