1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
8 define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
9 ; CHECK-LABEL: vsitofp_v1i1_v1f16:
11 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
12 ; CHECK-NEXT: vmv.s.x v8, zero
13 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
14 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
16 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
20 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
21 define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
22 ; CHECK-LABEL: vuitofp_v1i1_v1f16:
24 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
25 ; CHECK-NEXT: vmv.s.x v8, zero
26 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
27 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
29 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
33 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
34 define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
35 ; CHECK-LABEL: vsitofp_v1i1_v1f32:
37 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
38 ; CHECK-NEXT: vmv.s.x v8, zero
39 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
40 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
42 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
46 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
47 define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
48 ; CHECK-LABEL: vuitofp_v1i1_v1f32:
50 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
51 ; CHECK-NEXT: vmv.s.x v8, zero
52 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
53 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
55 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
59 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
60 define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
61 ; CHECK-LABEL: vsitofp_v1i1_v1f64:
63 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
64 ; CHECK-NEXT: vmv.s.x v8, zero
65 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
66 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
68 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69 ret <1 x double> %evec
72 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
73 define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
74 ; CHECK-LABEL: vuitofp_v1i1_v1f64:
76 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
77 ; CHECK-NEXT: vmv.s.x v8, zero
78 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
79 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
81 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82 ret <1 x double> %evec
85 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
86 define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
87 ; CHECK-LABEL: vsitofp_v2i1_v2f16:
89 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
90 ; CHECK-NEXT: vmv.v.i v8, 0
91 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
92 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
94 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
98 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
99 define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
100 ; CHECK-LABEL: vuitofp_v2i1_v2f16:
102 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
103 ; CHECK-NEXT: vmv.v.i v8, 0
104 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
105 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
107 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
111 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
112 define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
113 ; CHECK-LABEL: vsitofp_v2i1_v2f32:
115 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
116 ; CHECK-NEXT: vmv.v.i v8, 0
117 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
118 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
120 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121 ret <2 x float> %evec
124 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
125 define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
126 ; CHECK-LABEL: vuitofp_v2i1_v2f32:
128 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
129 ; CHECK-NEXT: vmv.v.i v8, 0
130 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
131 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
133 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134 ret <2 x float> %evec
137 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
138 define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
139 ; CHECK-LABEL: vsitofp_v2i1_v2f64:
141 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
142 ; CHECK-NEXT: vmv.v.i v8, 0
143 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
144 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
146 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147 ret <2 x double> %evec
150 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
151 define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
152 ; CHECK-LABEL: vuitofp_v2i1_v2f64:
154 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
155 ; CHECK-NEXT: vmv.v.i v8, 0
156 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
157 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
159 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160 ret <2 x double> %evec
163 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
164 define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
165 ; CHECK-LABEL: vsitofp_v4i1_v4f16:
167 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
168 ; CHECK-NEXT: vmv.v.i v8, 0
169 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
170 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
172 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
176 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
177 define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
178 ; CHECK-LABEL: vuitofp_v4i1_v4f16:
180 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
181 ; CHECK-NEXT: vmv.v.i v8, 0
182 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
183 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
185 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
189 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
190 define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
191 ; CHECK-LABEL: vsitofp_v4i1_v4f32:
193 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
194 ; CHECK-NEXT: vmv.v.i v8, 0
195 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
196 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
198 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199 ret <4 x float> %evec
202 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
203 define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
204 ; CHECK-LABEL: vuitofp_v4i1_v4f32:
206 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
207 ; CHECK-NEXT: vmv.v.i v8, 0
208 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
209 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
211 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212 ret <4 x float> %evec
215 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
216 define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
217 ; CHECK-LABEL: vsitofp_v4i1_v4f64:
219 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
220 ; CHECK-NEXT: vmv.v.i v8, 0
221 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
222 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
224 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225 ret <4 x double> %evec
228 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
229 define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
230 ; CHECK-LABEL: vuitofp_v4i1_v4f64:
232 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
233 ; CHECK-NEXT: vmv.v.i v8, 0
234 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
235 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
237 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238 ret <4 x double> %evec
241 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
242 define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
243 ; CHECK-LABEL: vsitofp_v8i1_v8f16:
245 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
246 ; CHECK-NEXT: vmv.v.i v8, 0
247 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
248 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
250 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
254 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
255 define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
256 ; CHECK-LABEL: vuitofp_v8i1_v8f16:
258 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
259 ; CHECK-NEXT: vmv.v.i v8, 0
260 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
261 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
263 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
267 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
268 define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
269 ; CHECK-LABEL: vsitofp_v8i1_v8f32:
271 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
272 ; CHECK-NEXT: vmv.v.i v8, 0
273 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
274 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
276 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277 ret <8 x float> %evec
280 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
281 define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
282 ; CHECK-LABEL: vuitofp_v8i1_v8f32:
284 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
285 ; CHECK-NEXT: vmv.v.i v8, 0
286 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
287 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
289 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290 ret <8 x float> %evec
293 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
294 define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
295 ; CHECK-LABEL: vsitofp_v8i1_v8f64:
297 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
298 ; CHECK-NEXT: vmv.v.i v8, 0
299 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
300 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
302 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303 ret <8 x double> %evec
306 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
307 define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
308 ; CHECK-LABEL: vuitofp_v8i1_v8f64:
310 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
311 ; CHECK-NEXT: vmv.v.i v8, 0
312 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
313 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
315 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316 ret <8 x double> %evec
319 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
320 define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
321 ; CHECK-LABEL: vsitofp_v16i1_v16f16:
323 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
324 ; CHECK-NEXT: vmv.v.i v8, 0
325 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
326 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
328 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329 ret <16 x half> %evec
332 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
333 define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
334 ; CHECK-LABEL: vuitofp_v16i1_v16f16:
336 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
337 ; CHECK-NEXT: vmv.v.i v8, 0
338 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
339 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
341 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342 ret <16 x half> %evec
345 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
346 define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
347 ; CHECK-LABEL: vsitofp_v16i1_v16f32:
349 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
350 ; CHECK-NEXT: vmv.v.i v8, 0
351 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
352 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
354 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355 ret <16 x float> %evec
358 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
359 define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
360 ; CHECK-LABEL: vuitofp_v16i1_v16f32:
362 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
363 ; CHECK-NEXT: vmv.v.i v8, 0
364 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
365 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
367 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368 ret <16 x float> %evec
371 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
372 define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
373 ; CHECK-LABEL: vsitofp_v32i1_v32f16:
375 ; CHECK-NEXT: li a0, 32
376 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
377 ; CHECK-NEXT: vmv.v.i v8, 0
378 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
379 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
381 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
382 ret <32 x half> %evec
385 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
386 define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
387 ; CHECK-LABEL: vuitofp_v32i1_v32f16:
389 ; CHECK-NEXT: li a0, 32
390 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
391 ; CHECK-NEXT: vmv.v.i v8, 0
392 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
393 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
395 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
396 ret <32 x half> %evec
399 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
400 define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
401 ; CHECK-LABEL: vsitofp_v1i8_v1f16:
403 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
404 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
405 ; CHECK-NEXT: vmv1r.v v8, v9
407 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
411 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
412 define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
413 ; RV32-LABEL: vsitofp_v1i7_v1f16:
415 ; RV32-NEXT: slli a0, a0, 25
416 ; RV32-NEXT: srai a0, a0, 25
417 ; RV32-NEXT: fcvt.h.w fa5, a0
418 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
419 ; RV32-NEXT: vfmv.s.f v8, fa5
422 ; RV64-LABEL: vsitofp_v1i7_v1f16:
424 ; RV64-NEXT: slli a0, a0, 57
425 ; RV64-NEXT: srai a0, a0, 57
426 ; RV64-NEXT: fcvt.h.w fa5, a0
427 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
428 ; RV64-NEXT: vfmv.s.f v8, fa5
430 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
434 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
435 define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
436 ; CHECK-LABEL: vuitofp_v1i7_v1f16:
438 ; CHECK-NEXT: andi a0, a0, 127
439 ; CHECK-NEXT: fcvt.h.wu fa5, a0
440 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
441 ; CHECK-NEXT: vfmv.s.f v8, fa5
443 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
447 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
448 define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
449 ; CHECK-LABEL: vuitofp_v1i8_v1f16:
451 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
452 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
453 ; CHECK-NEXT: vmv1r.v v8, v9
455 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
459 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
460 define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
461 ; CHECK-LABEL: vsitofp_v1i8_v1f32:
463 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
464 ; CHECK-NEXT: vsext.vf2 v9, v8
465 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
467 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
468 ret <1 x float> %evec
471 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
472 define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
473 ; CHECK-LABEL: vuitofp_v1i8_v1f32:
475 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
476 ; CHECK-NEXT: vzext.vf2 v9, v8
477 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
479 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
480 ret <1 x float> %evec
483 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
484 define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
485 ; CHECK-LABEL: vsitofp_v1i8_v1f64:
487 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
488 ; CHECK-NEXT: vsext.vf4 v9, v8
489 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
491 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
492 ret <1 x double> %evec
495 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
496 define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
497 ; CHECK-LABEL: vuitofp_v1i8_v1f64:
499 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
500 ; CHECK-NEXT: vzext.vf4 v9, v8
501 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
503 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
504 ret <1 x double> %evec
507 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
508 define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
509 ; CHECK-LABEL: vsitofp_v2i8_v2f16:
511 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
512 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
513 ; CHECK-NEXT: vmv1r.v v8, v9
515 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
519 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
520 define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
521 ; CHECK-LABEL: vuitofp_v2i8_v2f16:
523 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
524 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
525 ; CHECK-NEXT: vmv1r.v v8, v9
527 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
531 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
532 define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
533 ; CHECK-LABEL: vsitofp_v2i8_v2f32:
535 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
536 ; CHECK-NEXT: vsext.vf2 v9, v8
537 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
539 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
540 ret <2 x float> %evec
543 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
544 define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
545 ; CHECK-LABEL: vuitofp_v2i8_v2f32:
547 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
548 ; CHECK-NEXT: vzext.vf2 v9, v8
549 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
551 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
552 ret <2 x float> %evec
555 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
556 define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
557 ; CHECK-LABEL: vsitofp_v2i8_v2f64:
559 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
560 ; CHECK-NEXT: vsext.vf4 v9, v8
561 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
563 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
564 ret <2 x double> %evec
567 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
568 define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
569 ; CHECK-LABEL: vuitofp_v2i8_v2f64:
571 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
572 ; CHECK-NEXT: vzext.vf4 v9, v8
573 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
575 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
576 ret <2 x double> %evec
579 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
580 define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
581 ; CHECK-LABEL: vsitofp_v4i8_v4f16:
583 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
584 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
585 ; CHECK-NEXT: vmv1r.v v8, v9
587 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
591 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
592 define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
593 ; CHECK-LABEL: vuitofp_v4i8_v4f16:
595 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
596 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
597 ; CHECK-NEXT: vmv1r.v v8, v9
599 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
603 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
604 define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
605 ; CHECK-LABEL: vsitofp_v4i8_v4f32:
607 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
608 ; CHECK-NEXT: vsext.vf2 v9, v8
609 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
611 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
612 ret <4 x float> %evec
615 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
616 define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
617 ; CHECK-LABEL: vuitofp_v4i8_v4f32:
619 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
620 ; CHECK-NEXT: vzext.vf2 v9, v8
621 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
623 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
624 ret <4 x float> %evec
627 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
628 define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
629 ; CHECK-LABEL: vsitofp_v4i8_v4f64:
631 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
632 ; CHECK-NEXT: vsext.vf4 v10, v8
633 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
635 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
636 ret <4 x double> %evec
639 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
640 define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
641 ; CHECK-LABEL: vuitofp_v4i8_v4f64:
643 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
644 ; CHECK-NEXT: vzext.vf4 v10, v8
645 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
647 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
648 ret <4 x double> %evec
651 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
652 define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
653 ; CHECK-LABEL: vsitofp_v8i8_v8f16:
655 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
656 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
657 ; CHECK-NEXT: vmv1r.v v8, v9
659 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
663 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
664 define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
665 ; CHECK-LABEL: vuitofp_v8i8_v8f16:
667 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
668 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
669 ; CHECK-NEXT: vmv1r.v v8, v9
671 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
675 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
676 define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
677 ; CHECK-LABEL: vsitofp_v8i8_v8f32:
679 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
680 ; CHECK-NEXT: vsext.vf2 v10, v8
681 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
683 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
684 ret <8 x float> %evec
687 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
688 define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
689 ; CHECK-LABEL: vuitofp_v8i8_v8f32:
691 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
692 ; CHECK-NEXT: vzext.vf2 v10, v8
693 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
695 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
696 ret <8 x float> %evec
699 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
700 define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
701 ; CHECK-LABEL: vsitofp_v8i8_v8f64:
703 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
704 ; CHECK-NEXT: vsext.vf4 v12, v8
705 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
707 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
708 ret <8 x double> %evec
711 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
712 define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
713 ; CHECK-LABEL: vuitofp_v8i8_v8f64:
715 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
716 ; CHECK-NEXT: vzext.vf4 v12, v8
717 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
719 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
720 ret <8 x double> %evec
723 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
724 define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
725 ; CHECK-LABEL: vsitofp_v16i8_v16f16:
727 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
728 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
729 ; CHECK-NEXT: vmv2r.v v8, v10
731 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
732 ret <16 x half> %evec
735 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
736 define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
737 ; CHECK-LABEL: vuitofp_v16i8_v16f16:
739 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
740 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
741 ; CHECK-NEXT: vmv2r.v v8, v10
743 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
744 ret <16 x half> %evec
747 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
748 define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
749 ; CHECK-LABEL: vsitofp_v16i8_v16f32:
751 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
752 ; CHECK-NEXT: vsext.vf2 v12, v8
753 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
755 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
756 ret <16 x float> %evec
759 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
760 define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
761 ; CHECK-LABEL: vuitofp_v16i8_v16f32:
763 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
764 ; CHECK-NEXT: vzext.vf2 v12, v8
765 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
767 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
768 ret <16 x float> %evec
771 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
772 define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
773 ; CHECK-LABEL: vsitofp_v32i8_v32f16:
775 ; CHECK-NEXT: li a0, 32
776 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
777 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
778 ; CHECK-NEXT: vmv4r.v v8, v12
780 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
781 ret <32 x half> %evec
784 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
785 define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
786 ; CHECK-LABEL: vuitofp_v32i8_v32f16:
788 ; CHECK-NEXT: li a0, 32
789 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
790 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
791 ; CHECK-NEXT: vmv4r.v v8, v12
793 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
794 ret <32 x half> %evec
797 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
798 define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
799 ; CHECK-LABEL: vsitofp_v1i16_v1f16:
801 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
802 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
804 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
808 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
809 define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
810 ; CHECK-LABEL: vuitofp_v1i16_v1f16:
812 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
813 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
815 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
819 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
820 define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
821 ; CHECK-LABEL: vsitofp_v1i16_v1f32:
823 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
824 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
825 ; CHECK-NEXT: vmv1r.v v8, v9
827 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
828 ret <1 x float> %evec
831 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
832 define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
833 ; CHECK-LABEL: vuitofp_v1i16_v1f32:
835 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
836 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
837 ; CHECK-NEXT: vmv1r.v v8, v9
839 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
840 ret <1 x float> %evec
843 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
844 define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
845 ; CHECK-LABEL: vsitofp_v1i16_v1f64:
847 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
848 ; CHECK-NEXT: vsext.vf2 v9, v8
849 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
851 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
852 ret <1 x double> %evec
855 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
856 define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
857 ; CHECK-LABEL: vuitofp_v1i16_v1f64:
859 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
860 ; CHECK-NEXT: vzext.vf2 v9, v8
861 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
863 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
864 ret <1 x double> %evec
867 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
868 define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
869 ; CHECK-LABEL: vsitofp_v2i16_v2f16:
871 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
872 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
874 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
878 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
879 define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
880 ; CHECK-LABEL: vuitofp_v2i16_v2f16:
882 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
883 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
885 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
889 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
890 define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
891 ; CHECK-LABEL: vsitofp_v2i16_v2f32:
893 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
894 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
895 ; CHECK-NEXT: vmv1r.v v8, v9
897 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
898 ret <2 x float> %evec
901 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
902 define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
903 ; CHECK-LABEL: vuitofp_v2i16_v2f32:
905 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
906 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
907 ; CHECK-NEXT: vmv1r.v v8, v9
909 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
910 ret <2 x float> %evec
913 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
914 define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
915 ; CHECK-LABEL: vsitofp_v2i16_v2f64:
917 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
918 ; CHECK-NEXT: vsext.vf2 v9, v8
919 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
921 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
922 ret <2 x double> %evec
925 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
926 define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
927 ; CHECK-LABEL: vuitofp_v2i16_v2f64:
929 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
930 ; CHECK-NEXT: vzext.vf2 v9, v8
931 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
933 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
934 ret <2 x double> %evec
937 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
938 define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
939 ; CHECK-LABEL: vsitofp_v4i16_v4f16:
941 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
942 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
944 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
948 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
949 define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
950 ; CHECK-LABEL: vuitofp_v4i16_v4f16:
952 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
953 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
955 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
959 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
960 define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
961 ; CHECK-LABEL: vsitofp_v4i16_v4f32:
963 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
964 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
965 ; CHECK-NEXT: vmv1r.v v8, v9
967 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
968 ret <4 x float> %evec
971 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
972 define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
973 ; CHECK-LABEL: vuitofp_v4i16_v4f32:
975 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
976 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
977 ; CHECK-NEXT: vmv1r.v v8, v9
979 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
980 ret <4 x float> %evec
983 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
984 define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
985 ; CHECK-LABEL: vsitofp_v4i16_v4f64:
987 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
988 ; CHECK-NEXT: vsext.vf2 v10, v8
989 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
991 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
992 ret <4 x double> %evec
995 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
996 define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
997 ; CHECK-LABEL: vuitofp_v4i16_v4f64:
999 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1000 ; CHECK-NEXT: vzext.vf2 v10, v8
1001 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
1003 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1004 ret <4 x double> %evec
1007 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1008 define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1009 ; CHECK-LABEL: vsitofp_v8i16_v8f16:
1011 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1012 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1014 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1015 ret <8 x half> %evec
1018 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1019 define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1020 ; CHECK-LABEL: vuitofp_v8i16_v8f16:
1022 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1023 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1025 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1026 ret <8 x half> %evec
1029 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1030 define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1031 ; CHECK-LABEL: vsitofp_v8i16_v8f32:
1033 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1034 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1035 ; CHECK-NEXT: vmv2r.v v8, v10
1037 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1038 ret <8 x float> %evec
1041 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1042 define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1043 ; CHECK-LABEL: vuitofp_v8i16_v8f32:
1045 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1046 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1047 ; CHECK-NEXT: vmv2r.v v8, v10
1049 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1050 ret <8 x float> %evec
1053 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1054 define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1055 ; CHECK-LABEL: vsitofp_v8i16_v8f64:
1057 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1058 ; CHECK-NEXT: vsext.vf2 v12, v8
1059 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
1061 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1062 ret <8 x double> %evec
1065 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1066 define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1067 ; CHECK-LABEL: vuitofp_v8i16_v8f64:
1069 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1070 ; CHECK-NEXT: vzext.vf2 v12, v8
1071 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
1073 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1074 ret <8 x double> %evec
1077 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1078 define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1079 ; CHECK-LABEL: vsitofp_v16i16_v16f16:
1081 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1082 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1084 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1085 ret <16 x half> %evec
1088 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1089 define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1090 ; CHECK-LABEL: vuitofp_v16i16_v16f16:
1092 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1093 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1095 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1096 ret <16 x half> %evec
1099 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1100 define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1101 ; CHECK-LABEL: vsitofp_v16i16_v16f32:
1103 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1104 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1105 ; CHECK-NEXT: vmv4r.v v8, v12
1107 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1108 ret <16 x float> %evec
1111 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1112 define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1113 ; CHECK-LABEL: vuitofp_v16i16_v16f32:
1115 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1116 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1117 ; CHECK-NEXT: vmv4r.v v8, v12
1119 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1120 ret <16 x float> %evec
1123 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1124 define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1125 ; CHECK-LABEL: vsitofp_v32i16_v32f16:
1127 ; CHECK-NEXT: li a0, 32
1128 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1129 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1131 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1132 ret <32 x half> %evec
1135 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1136 define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1137 ; CHECK-LABEL: vuitofp_v32i16_v32f16:
1139 ; CHECK-NEXT: li a0, 32
1140 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1141 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1143 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1144 ret <32 x half> %evec
1147 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1148 define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1149 ; CHECK-LABEL: vsitofp_v1i32_v1f16:
1151 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1152 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1153 ; CHECK-NEXT: vmv1r.v v8, v9
1155 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1156 ret <1 x half> %evec
1159 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1160 define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1161 ; CHECK-LABEL: vuitofp_v1i32_v1f16:
1163 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1164 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1165 ; CHECK-NEXT: vmv1r.v v8, v9
1167 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1168 ret <1 x half> %evec
1171 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1172 define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1173 ; CHECK-LABEL: vsitofp_v1i32_v1f32:
1175 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1176 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1178 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1179 ret <1 x float> %evec
1182 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1183 define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1184 ; CHECK-LABEL: vuitofp_v1i32_v1f32:
1186 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1187 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1189 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1190 ret <1 x float> %evec
1193 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1194 define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1195 ; CHECK-LABEL: vsitofp_v1i32_v1f64:
1197 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1198 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1199 ; CHECK-NEXT: vmv1r.v v8, v9
1201 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1202 ret <1 x double> %evec
1205 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1206 define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1207 ; CHECK-LABEL: vuitofp_v1i32_v1f64:
1209 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1210 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1211 ; CHECK-NEXT: vmv1r.v v8, v9
1213 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1214 ret <1 x double> %evec
1217 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1218 define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1219 ; CHECK-LABEL: vsitofp_v2i32_v2f16:
1221 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1222 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1223 ; CHECK-NEXT: vmv1r.v v8, v9
1225 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1226 ret <2 x half> %evec
1229 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1230 define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1231 ; CHECK-LABEL: vuitofp_v2i32_v2f16:
1233 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1234 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1235 ; CHECK-NEXT: vmv1r.v v8, v9
1237 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1238 ret <2 x half> %evec
1241 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1242 define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1243 ; CHECK-LABEL: vsitofp_v2i32_v2f32:
1245 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1246 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1248 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1249 ret <2 x float> %evec
1252 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1253 define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1254 ; CHECK-LABEL: vuitofp_v2i32_v2f32:
1256 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1257 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1259 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1260 ret <2 x float> %evec
1263 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1264 define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1265 ; CHECK-LABEL: vsitofp_v2i32_v2f64:
1267 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1268 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1269 ; CHECK-NEXT: vmv1r.v v8, v9
1271 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1272 ret <2 x double> %evec
1275 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1276 define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1277 ; CHECK-LABEL: vuitofp_v2i32_v2f64:
1279 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1280 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1281 ; CHECK-NEXT: vmv1r.v v8, v9
1283 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1284 ret <2 x double> %evec
1287 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1288 define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1289 ; CHECK-LABEL: vsitofp_v4i32_v4f16:
1291 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1292 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1293 ; CHECK-NEXT: vmv1r.v v8, v9
1295 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1296 ret <4 x half> %evec
1299 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1300 define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1301 ; CHECK-LABEL: vuitofp_v4i32_v4f16:
1303 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1304 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1305 ; CHECK-NEXT: vmv1r.v v8, v9
1307 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1308 ret <4 x half> %evec
1311 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1312 define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1313 ; CHECK-LABEL: vsitofp_v4i32_v4f32:
1315 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1316 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1318 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1319 ret <4 x float> %evec
1322 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1323 define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1324 ; CHECK-LABEL: vuitofp_v4i32_v4f32:
1326 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1327 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1329 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1330 ret <4 x float> %evec
1333 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1334 define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1335 ; CHECK-LABEL: vsitofp_v4i32_v4f64:
1337 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1338 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1339 ; CHECK-NEXT: vmv2r.v v8, v10
1341 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1342 ret <4 x double> %evec
1345 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1346 define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1347 ; CHECK-LABEL: vuitofp_v4i32_v4f64:
1349 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1350 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1351 ; CHECK-NEXT: vmv2r.v v8, v10
1353 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1354 ret <4 x double> %evec
1357 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1358 define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1359 ; CHECK-LABEL: vsitofp_v8i32_v8f16:
1361 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1362 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1363 ; CHECK-NEXT: vmv.v.v v8, v10
1365 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1366 ret <8 x half> %evec
1369 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1370 define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1371 ; CHECK-LABEL: vuitofp_v8i32_v8f16:
1373 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1374 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1375 ; CHECK-NEXT: vmv.v.v v8, v10
1377 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1378 ret <8 x half> %evec
1381 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1382 define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1383 ; CHECK-LABEL: vsitofp_v8i32_v8f32:
1385 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1386 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1388 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1389 ret <8 x float> %evec
1392 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1393 define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1394 ; CHECK-LABEL: vuitofp_v8i32_v8f32:
1396 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1397 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1399 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1400 ret <8 x float> %evec
1403 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1404 define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1405 ; CHECK-LABEL: vsitofp_v8i32_v8f64:
1407 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1408 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1409 ; CHECK-NEXT: vmv4r.v v8, v12
1411 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1412 ret <8 x double> %evec
1415 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1416 define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1417 ; CHECK-LABEL: vuitofp_v8i32_v8f64:
1419 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1420 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1421 ; CHECK-NEXT: vmv4r.v v8, v12
1423 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1424 ret <8 x double> %evec
1427 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1428 define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1429 ; CHECK-LABEL: vsitofp_v16i32_v16f16:
1431 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1432 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1433 ; CHECK-NEXT: vmv.v.v v8, v12
1435 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1436 ret <16 x half> %evec
1439 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1440 define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1441 ; CHECK-LABEL: vuitofp_v16i32_v16f16:
1443 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1444 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1445 ; CHECK-NEXT: vmv.v.v v8, v12
1447 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1448 ret <16 x half> %evec
1451 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1452 define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1453 ; CHECK-LABEL: vsitofp_v16i32_v16f32:
1455 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1456 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1458 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1459 ret <16 x float> %evec
1462 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1463 define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1464 ; CHECK-LABEL: vuitofp_v16i32_v16f32:
1466 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1467 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1469 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1470 ret <16 x float> %evec
1473 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1474 define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1475 ; CHECK-LABEL: vsitofp_v1i64_v1f16:
1477 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1478 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1479 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1480 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1482 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1483 ret <1 x half> %evec
1486 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1487 define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1488 ; CHECK-LABEL: vuitofp_v1i64_v1f16:
1490 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1491 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1492 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1493 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1495 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1496 ret <1 x half> %evec
1499 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1500 define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1501 ; CHECK-LABEL: vsitofp_v1i64_v1f32:
1503 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1504 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1505 ; CHECK-NEXT: vmv1r.v v8, v9
1507 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1508 ret <1 x float> %evec
1511 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1512 define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1513 ; CHECK-LABEL: vuitofp_v1i64_v1f32:
1515 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1516 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1517 ; CHECK-NEXT: vmv1r.v v8, v9
1519 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1520 ret <1 x float> %evec
1523 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1524 define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1525 ; CHECK-LABEL: vsitofp_v1i64_v1f64:
1527 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1528 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1530 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1531 ret <1 x double> %evec
1534 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1535 define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1536 ; CHECK-LABEL: vuitofp_v1i64_v1f64:
1538 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1539 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1541 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1542 ret <1 x double> %evec
1546 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1547 define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1548 ; CHECK-LABEL: vsitofp_v2i64_v2f16:
1550 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1551 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1552 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1553 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1555 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1556 ret <2 x half> %evec
1559 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1560 define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1561 ; CHECK-LABEL: vuitofp_v2i64_v2f16:
1563 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1564 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1565 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1566 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1568 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1569 ret <2 x half> %evec
1572 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1573 define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1574 ; CHECK-LABEL: vsitofp_v2i64_v2f32:
1576 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1577 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1578 ; CHECK-NEXT: vmv1r.v v8, v9
1580 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1581 ret <2 x float> %evec
1584 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1585 define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1586 ; CHECK-LABEL: vuitofp_v2i64_v2f32:
1588 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1589 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1590 ; CHECK-NEXT: vmv1r.v v8, v9
1592 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1593 ret <2 x float> %evec
1596 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1597 define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1598 ; CHECK-LABEL: vsitofp_v2i64_v2f64:
1600 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1601 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1603 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1604 ret <2 x double> %evec
1607 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1608 define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1609 ; CHECK-LABEL: vuitofp_v2i64_v2f64:
1611 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1612 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1614 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1615 ret <2 x double> %evec
1618 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1619 define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1620 ; CHECK-LABEL: vsitofp_v4i64_v4f16:
1622 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1623 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1624 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1625 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1627 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1628 ret <4 x half> %evec
1631 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1632 define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1633 ; CHECK-LABEL: vuitofp_v4i64_v4f16:
1635 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1636 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1637 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1638 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1640 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1641 ret <4 x half> %evec
1644 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1645 define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1646 ; CHECK-LABEL: vsitofp_v4i64_v4f32:
1648 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1649 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1650 ; CHECK-NEXT: vmv.v.v v8, v10
1652 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1653 ret <4 x float> %evec
1656 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1657 define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1658 ; CHECK-LABEL: vuitofp_v4i64_v4f32:
1660 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1661 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1662 ; CHECK-NEXT: vmv.v.v v8, v10
1664 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1665 ret <4 x float> %evec
1668 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1669 define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1670 ; CHECK-LABEL: vsitofp_v4i64_v4f64:
1672 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1673 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1675 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1676 ret <4 x double> %evec
1679 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1680 define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1681 ; CHECK-LABEL: vuitofp_v4i64_v4f64:
1683 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1684 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1686 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1687 ret <4 x double> %evec
1690 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1691 define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1692 ; CHECK-LABEL: vsitofp_v8i64_v8f16:
1694 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1695 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1696 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1697 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1699 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1700 ret <8 x half> %evec
1703 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1704 define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1705 ; CHECK-LABEL: vuitofp_v8i64_v8f16:
1707 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1708 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1709 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1710 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1712 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1713 ret <8 x half> %evec
1716 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1717 define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1718 ; CHECK-LABEL: vsitofp_v8i64_v8f32:
1720 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1721 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1722 ; CHECK-NEXT: vmv.v.v v8, v12
1724 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1725 ret <8 x float> %evec
1728 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1729 define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1730 ; CHECK-LABEL: vuitofp_v8i64_v8f32:
1732 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1733 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1734 ; CHECK-NEXT: vmv.v.v v8, v12
1736 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1737 ret <8 x float> %evec
1740 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1741 define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1742 ; CHECK-LABEL: vsitofp_v8i64_v8f64:
1744 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1745 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1747 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1748 ret <8 x double> %evec
1751 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1752 define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1753 ; CHECK-LABEL: vuitofp_v8i64_v8f64:
1755 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1756 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1758 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1759 ret <8 x double> %evec