1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
8 define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
9 ; CHECK-LABEL: vsitofp_v1i1_v1f16:
11 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
12 ; CHECK-NEXT: vmv.s.x v8, zero
13 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
14 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
16 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
20 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
21 define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
22 ; CHECK-LABEL: vuitofp_v1i1_v1f16:
24 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
25 ; CHECK-NEXT: vmv.s.x v8, zero
26 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
27 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
29 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
33 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
34 define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
35 ; CHECK-LABEL: vsitofp_v1i1_v1f32:
37 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
38 ; CHECK-NEXT: vmv.s.x v8, zero
39 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
40 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
42 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
46 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
47 define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
48 ; CHECK-LABEL: vuitofp_v1i1_v1f32:
50 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
51 ; CHECK-NEXT: vmv.s.x v8, zero
52 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
53 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
55 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
59 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
60 define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
61 ; CHECK-LABEL: vsitofp_v1i1_v1f64:
63 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
64 ; CHECK-NEXT: vmv.s.x v8, zero
65 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
66 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
68 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
69 ret <1 x double> %evec
72 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
73 define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
74 ; CHECK-LABEL: vuitofp_v1i1_v1f64:
76 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
77 ; CHECK-NEXT: vmv.s.x v8, zero
78 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
79 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
81 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
82 ret <1 x double> %evec
85 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
86 define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
87 ; CHECK-LABEL: vsitofp_v2i1_v2f16:
89 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
90 ; CHECK-NEXT: vmv.v.i v8, 0
91 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
92 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
94 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
98 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
99 define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
100 ; CHECK-LABEL: vuitofp_v2i1_v2f16:
102 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
103 ; CHECK-NEXT: vmv.v.i v8, 0
104 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
105 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
107 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
111 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
112 define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
113 ; CHECK-LABEL: vsitofp_v2i1_v2f32:
115 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
116 ; CHECK-NEXT: vmv.v.i v8, 0
117 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
118 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
120 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
121 ret <2 x float> %evec
124 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
125 define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
126 ; CHECK-LABEL: vuitofp_v2i1_v2f32:
128 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
129 ; CHECK-NEXT: vmv.v.i v8, 0
130 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
131 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
133 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
134 ret <2 x float> %evec
137 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
138 define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
139 ; CHECK-LABEL: vsitofp_v2i1_v2f64:
141 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
142 ; CHECK-NEXT: vmv.v.i v8, 0
143 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
144 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
146 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
147 ret <2 x double> %evec
150 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
151 define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
152 ; CHECK-LABEL: vuitofp_v2i1_v2f64:
154 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
155 ; CHECK-NEXT: vmv.v.i v8, 0
156 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
157 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
159 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
160 ret <2 x double> %evec
163 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
164 define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
165 ; CHECK-LABEL: vsitofp_v4i1_v4f16:
167 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
168 ; CHECK-NEXT: vmv.v.i v8, 0
169 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
170 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
172 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
176 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
177 define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
178 ; CHECK-LABEL: vuitofp_v4i1_v4f16:
180 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
181 ; CHECK-NEXT: vmv.v.i v8, 0
182 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
183 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
185 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
189 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
190 define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
191 ; CHECK-LABEL: vsitofp_v4i1_v4f32:
193 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
194 ; CHECK-NEXT: vmv.v.i v8, 0
195 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
196 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
198 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
199 ret <4 x float> %evec
202 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
203 define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
204 ; CHECK-LABEL: vuitofp_v4i1_v4f32:
206 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
207 ; CHECK-NEXT: vmv.v.i v8, 0
208 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
209 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
211 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
212 ret <4 x float> %evec
215 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
216 define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
217 ; CHECK-LABEL: vsitofp_v4i1_v4f64:
219 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
220 ; CHECK-NEXT: vmv.v.i v8, 0
221 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
222 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
224 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
225 ret <4 x double> %evec
228 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
229 define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
230 ; CHECK-LABEL: vuitofp_v4i1_v4f64:
232 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
233 ; CHECK-NEXT: vmv.v.i v8, 0
234 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
235 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
237 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
238 ret <4 x double> %evec
241 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
242 define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
243 ; CHECK-LABEL: vsitofp_v8i1_v8f16:
245 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
246 ; CHECK-NEXT: vmv.v.i v8, 0
247 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
248 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
250 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
254 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
255 define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
256 ; CHECK-LABEL: vuitofp_v8i1_v8f16:
258 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
259 ; CHECK-NEXT: vmv.v.i v8, 0
260 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
261 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
263 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
267 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
268 define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
269 ; CHECK-LABEL: vsitofp_v8i1_v8f32:
271 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
272 ; CHECK-NEXT: vmv.v.i v8, 0
273 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
274 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
276 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
277 ret <8 x float> %evec
280 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
281 define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
282 ; CHECK-LABEL: vuitofp_v8i1_v8f32:
284 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
285 ; CHECK-NEXT: vmv.v.i v8, 0
286 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
287 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
289 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
290 ret <8 x float> %evec
293 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
294 define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
295 ; CHECK-LABEL: vsitofp_v8i1_v8f64:
297 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
298 ; CHECK-NEXT: vmv.v.i v8, 0
299 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
300 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
302 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
303 ret <8 x double> %evec
306 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
307 define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
308 ; CHECK-LABEL: vuitofp_v8i1_v8f64:
310 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
311 ; CHECK-NEXT: vmv.v.i v8, 0
312 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
313 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
315 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
316 ret <8 x double> %evec
319 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
320 define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
321 ; CHECK-LABEL: vsitofp_v16i1_v16f16:
323 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
324 ; CHECK-NEXT: vmv.v.i v8, 0
325 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
326 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
328 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
329 ret <16 x half> %evec
332 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
333 define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
334 ; CHECK-LABEL: vuitofp_v16i1_v16f16:
336 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
337 ; CHECK-NEXT: vmv.v.i v8, 0
338 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
339 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
341 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
342 ret <16 x half> %evec
345 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
346 define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
347 ; CHECK-LABEL: vsitofp_v16i1_v16f32:
349 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
350 ; CHECK-NEXT: vmv.v.i v8, 0
351 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
352 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
354 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
355 ret <16 x float> %evec
358 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
359 define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
360 ; CHECK-LABEL: vuitofp_v16i1_v16f32:
362 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
363 ; CHECK-NEXT: vmv.v.i v8, 0
364 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
365 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
367 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
368 ret <16 x float> %evec
371 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
372 define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
373 ; CHECK-LABEL: vsitofp_v32i1_v32f16:
375 ; CHECK-NEXT: li a0, 32
376 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
377 ; CHECK-NEXT: vmv.v.i v8, 0
378 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
379 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
381 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
382 ret <32 x half> %evec
385 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
386 define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
387 ; CHECK-LABEL: vuitofp_v32i1_v32f16:
389 ; CHECK-NEXT: li a0, 32
390 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
391 ; CHECK-NEXT: vmv.v.i v8, 0
392 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
393 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
395 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
396 ret <32 x half> %evec
399 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
400 define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
401 ; CHECK-LABEL: vsitofp_v1i8_v1f16:
403 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
404 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
405 ; CHECK-NEXT: vmv1r.v v8, v9
407 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
411 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
412 define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
413 ; RV32-LABEL: vsitofp_v1i7_v1f16:
415 ; RV32-NEXT: addi sp, sp, -16
416 ; RV32-NEXT: .cfi_def_cfa_offset 16
417 ; RV32-NEXT: slli a0, a0, 25
418 ; RV32-NEXT: srai a0, a0, 25
419 ; RV32-NEXT: fcvt.h.w fa5, a0
420 ; RV32-NEXT: fsh fa5, 14(sp)
421 ; RV32-NEXT: addi a0, sp, 14
422 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
423 ; RV32-NEXT: vle16.v v8, (a0)
424 ; RV32-NEXT: addi sp, sp, 16
427 ; RV64-LABEL: vsitofp_v1i7_v1f16:
429 ; RV64-NEXT: addi sp, sp, -16
430 ; RV64-NEXT: .cfi_def_cfa_offset 16
431 ; RV64-NEXT: slli a0, a0, 57
432 ; RV64-NEXT: srai a0, a0, 57
433 ; RV64-NEXT: fcvt.h.w fa5, a0
434 ; RV64-NEXT: fsh fa5, 14(sp)
435 ; RV64-NEXT: addi a0, sp, 14
436 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
437 ; RV64-NEXT: vle16.v v8, (a0)
438 ; RV64-NEXT: addi sp, sp, 16
440 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
444 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
445 define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
446 ; CHECK-LABEL: vuitofp_v1i7_v1f16:
448 ; CHECK-NEXT: addi sp, sp, -16
449 ; CHECK-NEXT: .cfi_def_cfa_offset 16
450 ; CHECK-NEXT: andi a0, a0, 127
451 ; CHECK-NEXT: fcvt.h.wu fa5, a0
452 ; CHECK-NEXT: fsh fa5, 14(sp)
453 ; CHECK-NEXT: addi a0, sp, 14
454 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
455 ; CHECK-NEXT: vle16.v v8, (a0)
456 ; CHECK-NEXT: addi sp, sp, 16
458 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
462 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
463 define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
464 ; CHECK-LABEL: vuitofp_v1i8_v1f16:
466 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
467 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
468 ; CHECK-NEXT: vmv1r.v v8, v9
470 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
474 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
475 define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
476 ; CHECK-LABEL: vsitofp_v1i8_v1f32:
478 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
479 ; CHECK-NEXT: vsext.vf2 v9, v8
480 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
482 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
483 ret <1 x float> %evec
486 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
487 define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
488 ; CHECK-LABEL: vuitofp_v1i8_v1f32:
490 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
491 ; CHECK-NEXT: vzext.vf2 v9, v8
492 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
494 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
495 ret <1 x float> %evec
498 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
499 define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
500 ; CHECK-LABEL: vsitofp_v1i8_v1f64:
502 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
503 ; CHECK-NEXT: vsext.vf4 v9, v8
504 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
506 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
507 ret <1 x double> %evec
510 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
511 define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
512 ; CHECK-LABEL: vuitofp_v1i8_v1f64:
514 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
515 ; CHECK-NEXT: vzext.vf4 v9, v8
516 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
518 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
519 ret <1 x double> %evec
522 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
523 define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
524 ; CHECK-LABEL: vsitofp_v2i8_v2f16:
526 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
527 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
528 ; CHECK-NEXT: vmv1r.v v8, v9
530 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
534 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
535 define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
536 ; CHECK-LABEL: vuitofp_v2i8_v2f16:
538 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
539 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
540 ; CHECK-NEXT: vmv1r.v v8, v9
542 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
546 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
547 define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
548 ; CHECK-LABEL: vsitofp_v2i8_v2f32:
550 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
551 ; CHECK-NEXT: vsext.vf2 v9, v8
552 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
554 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
555 ret <2 x float> %evec
558 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
559 define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
560 ; CHECK-LABEL: vuitofp_v2i8_v2f32:
562 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
563 ; CHECK-NEXT: vzext.vf2 v9, v8
564 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
566 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
567 ret <2 x float> %evec
570 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
571 define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
572 ; CHECK-LABEL: vsitofp_v2i8_v2f64:
574 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
575 ; CHECK-NEXT: vsext.vf4 v9, v8
576 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
578 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
579 ret <2 x double> %evec
582 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
583 define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
584 ; CHECK-LABEL: vuitofp_v2i8_v2f64:
586 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
587 ; CHECK-NEXT: vzext.vf4 v9, v8
588 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
590 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
591 ret <2 x double> %evec
594 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
595 define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
596 ; CHECK-LABEL: vsitofp_v4i8_v4f16:
598 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
599 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
600 ; CHECK-NEXT: vmv1r.v v8, v9
602 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
606 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
607 define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
608 ; CHECK-LABEL: vuitofp_v4i8_v4f16:
610 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
611 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
612 ; CHECK-NEXT: vmv1r.v v8, v9
614 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
618 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
619 define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
620 ; CHECK-LABEL: vsitofp_v4i8_v4f32:
622 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
623 ; CHECK-NEXT: vsext.vf2 v9, v8
624 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
626 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
627 ret <4 x float> %evec
630 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
631 define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
632 ; CHECK-LABEL: vuitofp_v4i8_v4f32:
634 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
635 ; CHECK-NEXT: vzext.vf2 v9, v8
636 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
638 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
639 ret <4 x float> %evec
642 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
643 define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
644 ; CHECK-LABEL: vsitofp_v4i8_v4f64:
646 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
647 ; CHECK-NEXT: vsext.vf4 v10, v8
648 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
650 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
651 ret <4 x double> %evec
654 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
655 define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
656 ; CHECK-LABEL: vuitofp_v4i8_v4f64:
658 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
659 ; CHECK-NEXT: vzext.vf4 v10, v8
660 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
662 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
663 ret <4 x double> %evec
666 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
667 define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
668 ; CHECK-LABEL: vsitofp_v8i8_v8f16:
670 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
671 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
672 ; CHECK-NEXT: vmv1r.v v8, v9
674 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
678 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
679 define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
680 ; CHECK-LABEL: vuitofp_v8i8_v8f16:
682 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
683 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
684 ; CHECK-NEXT: vmv1r.v v8, v9
686 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
690 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
691 define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
692 ; CHECK-LABEL: vsitofp_v8i8_v8f32:
694 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
695 ; CHECK-NEXT: vsext.vf2 v10, v8
696 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
698 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
699 ret <8 x float> %evec
702 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
703 define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
704 ; CHECK-LABEL: vuitofp_v8i8_v8f32:
706 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
707 ; CHECK-NEXT: vzext.vf2 v10, v8
708 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
710 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
711 ret <8 x float> %evec
714 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
715 define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
716 ; CHECK-LABEL: vsitofp_v8i8_v8f64:
718 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
719 ; CHECK-NEXT: vsext.vf4 v12, v8
720 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
722 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
723 ret <8 x double> %evec
726 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
727 define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
728 ; CHECK-LABEL: vuitofp_v8i8_v8f64:
730 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
731 ; CHECK-NEXT: vzext.vf4 v12, v8
732 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
734 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
735 ret <8 x double> %evec
738 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
739 define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
740 ; CHECK-LABEL: vsitofp_v16i8_v16f16:
742 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
743 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
744 ; CHECK-NEXT: vmv2r.v v8, v10
746 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
747 ret <16 x half> %evec
750 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
751 define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
752 ; CHECK-LABEL: vuitofp_v16i8_v16f16:
754 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
755 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
756 ; CHECK-NEXT: vmv2r.v v8, v10
758 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
759 ret <16 x half> %evec
762 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
763 define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
764 ; CHECK-LABEL: vsitofp_v16i8_v16f32:
766 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
767 ; CHECK-NEXT: vsext.vf2 v12, v8
768 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
770 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
771 ret <16 x float> %evec
774 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
775 define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
776 ; CHECK-LABEL: vuitofp_v16i8_v16f32:
778 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
779 ; CHECK-NEXT: vzext.vf2 v12, v8
780 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
782 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
783 ret <16 x float> %evec
786 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
787 define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
788 ; CHECK-LABEL: vsitofp_v32i8_v32f16:
790 ; CHECK-NEXT: li a0, 32
791 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
792 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
793 ; CHECK-NEXT: vmv4r.v v8, v12
795 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
796 ret <32 x half> %evec
799 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
800 define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
801 ; CHECK-LABEL: vuitofp_v32i8_v32f16:
803 ; CHECK-NEXT: li a0, 32
804 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
805 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
806 ; CHECK-NEXT: vmv4r.v v8, v12
808 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
809 ret <32 x half> %evec
812 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
813 define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
814 ; CHECK-LABEL: vsitofp_v1i16_v1f16:
816 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
817 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
819 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
823 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
824 define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
825 ; CHECK-LABEL: vuitofp_v1i16_v1f16:
827 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
828 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
830 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
834 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
835 define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
836 ; CHECK-LABEL: vsitofp_v1i16_v1f32:
838 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
839 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
840 ; CHECK-NEXT: vmv1r.v v8, v9
842 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
843 ret <1 x float> %evec
846 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
847 define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
848 ; CHECK-LABEL: vuitofp_v1i16_v1f32:
850 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
851 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
852 ; CHECK-NEXT: vmv1r.v v8, v9
854 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
855 ret <1 x float> %evec
858 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
859 define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
860 ; CHECK-LABEL: vsitofp_v1i16_v1f64:
862 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
863 ; CHECK-NEXT: vsext.vf2 v9, v8
864 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
866 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
867 ret <1 x double> %evec
870 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
871 define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
872 ; CHECK-LABEL: vuitofp_v1i16_v1f64:
874 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
875 ; CHECK-NEXT: vzext.vf2 v9, v8
876 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
878 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
879 ret <1 x double> %evec
882 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
883 define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
884 ; CHECK-LABEL: vsitofp_v2i16_v2f16:
886 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
887 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
889 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
893 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
894 define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
895 ; CHECK-LABEL: vuitofp_v2i16_v2f16:
897 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
898 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
900 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
904 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
905 define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
906 ; CHECK-LABEL: vsitofp_v2i16_v2f32:
908 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
909 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
910 ; CHECK-NEXT: vmv1r.v v8, v9
912 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
913 ret <2 x float> %evec
916 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
917 define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
918 ; CHECK-LABEL: vuitofp_v2i16_v2f32:
920 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
921 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
922 ; CHECK-NEXT: vmv1r.v v8, v9
924 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
925 ret <2 x float> %evec
928 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
929 define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
930 ; CHECK-LABEL: vsitofp_v2i16_v2f64:
932 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
933 ; CHECK-NEXT: vsext.vf2 v9, v8
934 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
936 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
937 ret <2 x double> %evec
940 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
941 define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
942 ; CHECK-LABEL: vuitofp_v2i16_v2f64:
944 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
945 ; CHECK-NEXT: vzext.vf2 v9, v8
946 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
948 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
949 ret <2 x double> %evec
952 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
953 define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
954 ; CHECK-LABEL: vsitofp_v4i16_v4f16:
956 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
957 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
959 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
963 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
964 define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
965 ; CHECK-LABEL: vuitofp_v4i16_v4f16:
967 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
968 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
970 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
974 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
975 define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
976 ; CHECK-LABEL: vsitofp_v4i16_v4f32:
978 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
979 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
980 ; CHECK-NEXT: vmv1r.v v8, v9
982 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
983 ret <4 x float> %evec
986 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
987 define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
988 ; CHECK-LABEL: vuitofp_v4i16_v4f32:
990 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
991 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
992 ; CHECK-NEXT: vmv1r.v v8, v9
994 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
995 ret <4 x float> %evec
998 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
999 define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
1000 ; CHECK-LABEL: vsitofp_v4i16_v4f64:
1002 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1003 ; CHECK-NEXT: vsext.vf2 v10, v8
1004 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
1006 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1007 ret <4 x double> %evec
1010 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
1011 define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
1012 ; CHECK-LABEL: vuitofp_v4i16_v4f64:
1014 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1015 ; CHECK-NEXT: vzext.vf2 v10, v8
1016 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
1018 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1019 ret <4 x double> %evec
1022 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1023 define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1024 ; CHECK-LABEL: vsitofp_v8i16_v8f16:
1026 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1027 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1029 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1030 ret <8 x half> %evec
1033 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
1034 define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
1035 ; CHECK-LABEL: vuitofp_v8i16_v8f16:
1037 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1038 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1040 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1041 ret <8 x half> %evec
1044 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1045 define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1046 ; CHECK-LABEL: vsitofp_v8i16_v8f32:
1048 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1049 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1050 ; CHECK-NEXT: vmv2r.v v8, v10
1052 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1053 ret <8 x float> %evec
1056 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
1057 define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
1058 ; CHECK-LABEL: vuitofp_v8i16_v8f32:
1060 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1061 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1062 ; CHECK-NEXT: vmv2r.v v8, v10
1064 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1065 ret <8 x float> %evec
1068 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1069 define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1070 ; CHECK-LABEL: vsitofp_v8i16_v8f64:
1072 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1073 ; CHECK-NEXT: vsext.vf2 v12, v8
1074 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
1076 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1077 ret <8 x double> %evec
1080 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
1081 define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
1082 ; CHECK-LABEL: vuitofp_v8i16_v8f64:
1084 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1085 ; CHECK-NEXT: vzext.vf2 v12, v8
1086 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
1088 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1089 ret <8 x double> %evec
1092 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1093 define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1094 ; CHECK-LABEL: vsitofp_v16i16_v16f16:
1096 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1097 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1099 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1100 ret <16 x half> %evec
1103 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
1104 define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
1105 ; CHECK-LABEL: vuitofp_v16i16_v16f16:
1107 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1108 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1110 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1111 ret <16 x half> %evec
1114 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1115 define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1116 ; CHECK-LABEL: vsitofp_v16i16_v16f32:
1118 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1119 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1120 ; CHECK-NEXT: vmv4r.v v8, v12
1122 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1123 ret <16 x float> %evec
1126 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
1127 define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
1128 ; CHECK-LABEL: vuitofp_v16i16_v16f32:
1130 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1131 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1132 ; CHECK-NEXT: vmv4r.v v8, v12
1134 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1135 ret <16 x float> %evec
1138 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1139 define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1140 ; CHECK-LABEL: vsitofp_v32i16_v32f16:
1142 ; CHECK-NEXT: li a0, 32
1143 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1144 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1146 %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1147 ret <32 x half> %evec
1150 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
1151 define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
1152 ; CHECK-LABEL: vuitofp_v32i16_v32f16:
1154 ; CHECK-NEXT: li a0, 32
1155 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1156 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1158 %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1159 ret <32 x half> %evec
1162 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1163 define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1164 ; CHECK-LABEL: vsitofp_v1i32_v1f16:
1166 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1167 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1168 ; CHECK-NEXT: vmv1r.v v8, v9
1170 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1171 ret <1 x half> %evec
1174 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
1175 define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
1176 ; CHECK-LABEL: vuitofp_v1i32_v1f16:
1178 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
1179 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1180 ; CHECK-NEXT: vmv1r.v v8, v9
1182 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1183 ret <1 x half> %evec
1186 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1187 define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1188 ; CHECK-LABEL: vsitofp_v1i32_v1f32:
1190 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1191 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1193 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1194 ret <1 x float> %evec
1197 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
1198 define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
1199 ; CHECK-LABEL: vuitofp_v1i32_v1f32:
1201 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1202 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1204 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1205 ret <1 x float> %evec
1208 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1209 define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1210 ; CHECK-LABEL: vsitofp_v1i32_v1f64:
1212 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1213 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1214 ; CHECK-NEXT: vmv1r.v v8, v9
1216 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1217 ret <1 x double> %evec
1220 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
1221 define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
1222 ; CHECK-LABEL: vuitofp_v1i32_v1f64:
1224 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1225 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1226 ; CHECK-NEXT: vmv1r.v v8, v9
1228 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1229 ret <1 x double> %evec
1232 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1233 define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1234 ; CHECK-LABEL: vsitofp_v2i32_v2f16:
1236 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1237 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1238 ; CHECK-NEXT: vmv1r.v v8, v9
1240 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1241 ret <2 x half> %evec
1244 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
1245 define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
1246 ; CHECK-LABEL: vuitofp_v2i32_v2f16:
1248 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1249 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1250 ; CHECK-NEXT: vmv1r.v v8, v9
1252 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1253 ret <2 x half> %evec
1256 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1257 define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1258 ; CHECK-LABEL: vsitofp_v2i32_v2f32:
1260 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1261 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1263 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1264 ret <2 x float> %evec
1267 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
1268 define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
1269 ; CHECK-LABEL: vuitofp_v2i32_v2f32:
1271 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1272 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1274 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1275 ret <2 x float> %evec
1278 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1279 define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1280 ; CHECK-LABEL: vsitofp_v2i32_v2f64:
1282 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1283 ; CHECK-NEXT: vfwcvt.f.x.v v9, v8
1284 ; CHECK-NEXT: vmv1r.v v8, v9
1286 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1287 ret <2 x double> %evec
1290 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
1291 define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
1292 ; CHECK-LABEL: vuitofp_v2i32_v2f64:
1294 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1295 ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
1296 ; CHECK-NEXT: vmv1r.v v8, v9
1298 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1299 ret <2 x double> %evec
1302 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1303 define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1304 ; CHECK-LABEL: vsitofp_v4i32_v4f16:
1306 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1307 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1308 ; CHECK-NEXT: vmv1r.v v8, v9
1310 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1311 ret <4 x half> %evec
1314 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
1315 define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
1316 ; CHECK-LABEL: vuitofp_v4i32_v4f16:
1318 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1319 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1320 ; CHECK-NEXT: vmv1r.v v8, v9
1322 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1323 ret <4 x half> %evec
1326 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1327 define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1328 ; CHECK-LABEL: vsitofp_v4i32_v4f32:
1330 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1331 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1333 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1334 ret <4 x float> %evec
1337 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
1338 define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
1339 ; CHECK-LABEL: vuitofp_v4i32_v4f32:
1341 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1342 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1344 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1345 ret <4 x float> %evec
1348 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1349 define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1350 ; CHECK-LABEL: vsitofp_v4i32_v4f64:
1352 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1353 ; CHECK-NEXT: vfwcvt.f.x.v v10, v8
1354 ; CHECK-NEXT: vmv2r.v v8, v10
1356 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1357 ret <4 x double> %evec
1360 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
1361 define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
1362 ; CHECK-LABEL: vuitofp_v4i32_v4f64:
1364 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1365 ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
1366 ; CHECK-NEXT: vmv2r.v v8, v10
1368 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1369 ret <4 x double> %evec
1372 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1373 define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1374 ; CHECK-LABEL: vsitofp_v8i32_v8f16:
1376 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1377 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1378 ; CHECK-NEXT: vmv.v.v v8, v10
1380 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1381 ret <8 x half> %evec
1384 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
1385 define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
1386 ; CHECK-LABEL: vuitofp_v8i32_v8f16:
1388 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1389 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1390 ; CHECK-NEXT: vmv.v.v v8, v10
1392 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1393 ret <8 x half> %evec
1396 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1397 define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1398 ; CHECK-LABEL: vsitofp_v8i32_v8f32:
1400 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1401 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1403 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1404 ret <8 x float> %evec
1407 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
1408 define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
1409 ; CHECK-LABEL: vuitofp_v8i32_v8f32:
1411 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1412 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1414 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1415 ret <8 x float> %evec
1418 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1419 define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1420 ; CHECK-LABEL: vsitofp_v8i32_v8f64:
1422 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1423 ; CHECK-NEXT: vfwcvt.f.x.v v12, v8
1424 ; CHECK-NEXT: vmv4r.v v8, v12
1426 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1427 ret <8 x double> %evec
1430 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
1431 define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
1432 ; CHECK-LABEL: vuitofp_v8i32_v8f64:
1434 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1435 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
1436 ; CHECK-NEXT: vmv4r.v v8, v12
1438 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1439 ret <8 x double> %evec
1442 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1443 define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1444 ; CHECK-LABEL: vsitofp_v16i32_v16f16:
1446 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1447 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1448 ; CHECK-NEXT: vmv.v.v v8, v12
1450 %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1451 ret <16 x half> %evec
1454 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
1455 define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
1456 ; CHECK-LABEL: vuitofp_v16i32_v16f16:
1458 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
1459 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1460 ; CHECK-NEXT: vmv.v.v v8, v12
1462 %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1463 ret <16 x half> %evec
1466 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1467 define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1468 ; CHECK-LABEL: vsitofp_v16i32_v16f32:
1470 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1471 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1473 %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1474 ret <16 x float> %evec
1477 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
1478 define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
1479 ; CHECK-LABEL: vuitofp_v16i32_v16f32:
1481 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1482 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1484 %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1485 ret <16 x float> %evec
1488 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1489 define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1490 ; CHECK-LABEL: vsitofp_v1i64_v1f16:
1492 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1493 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1494 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1495 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1497 %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1498 ret <1 x half> %evec
1501 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
1502 define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
1503 ; CHECK-LABEL: vuitofp_v1i64_v1f16:
1505 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1506 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1507 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1508 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1510 %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1511 ret <1 x half> %evec
1514 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1515 define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1516 ; CHECK-LABEL: vsitofp_v1i64_v1f32:
1518 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1519 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1520 ; CHECK-NEXT: vmv1r.v v8, v9
1522 %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1523 ret <1 x float> %evec
1526 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
1527 define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
1528 ; CHECK-LABEL: vuitofp_v1i64_v1f32:
1530 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
1531 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1532 ; CHECK-NEXT: vmv1r.v v8, v9
1534 %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1535 ret <1 x float> %evec
1538 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1539 define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1540 ; CHECK-LABEL: vsitofp_v1i64_v1f64:
1542 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1543 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1545 %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1546 ret <1 x double> %evec
1549 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
1550 define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
1551 ; CHECK-LABEL: vuitofp_v1i64_v1f64:
1553 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1554 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1556 %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1557 ret <1 x double> %evec
1561 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1562 define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1563 ; CHECK-LABEL: vsitofp_v2i64_v2f16:
1565 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1566 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1567 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1568 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1570 %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1571 ret <2 x half> %evec
1574 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
1575 define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
1576 ; CHECK-LABEL: vuitofp_v2i64_v2f16:
1578 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1579 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1580 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
1581 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
1583 %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1584 ret <2 x half> %evec
1587 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1588 define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1589 ; CHECK-LABEL: vsitofp_v2i64_v2f32:
1591 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1592 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
1593 ; CHECK-NEXT: vmv1r.v v8, v9
1595 %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1596 ret <2 x float> %evec
1599 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
1600 define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
1601 ; CHECK-LABEL: vuitofp_v2i64_v2f32:
1603 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1604 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
1605 ; CHECK-NEXT: vmv1r.v v8, v9
1607 %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1608 ret <2 x float> %evec
1611 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1612 define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1613 ; CHECK-LABEL: vsitofp_v2i64_v2f64:
1615 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1616 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1618 %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1619 ret <2 x double> %evec
1622 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
1623 define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
1624 ; CHECK-LABEL: vuitofp_v2i64_v2f64:
1626 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1627 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1629 %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1630 ret <2 x double> %evec
1633 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1634 define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1635 ; CHECK-LABEL: vsitofp_v4i64_v4f16:
1637 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1638 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1639 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1640 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1642 %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1643 ret <4 x half> %evec
1646 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
1647 define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
1648 ; CHECK-LABEL: vuitofp_v4i64_v4f16:
1650 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1651 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1652 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
1653 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
1655 %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1656 ret <4 x half> %evec
1659 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1660 define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1661 ; CHECK-LABEL: vsitofp_v4i64_v4f32:
1663 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1664 ; CHECK-NEXT: vfncvt.f.x.w v10, v8
1665 ; CHECK-NEXT: vmv.v.v v8, v10
1667 %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1668 ret <4 x float> %evec
1671 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
1672 define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
1673 ; CHECK-LABEL: vuitofp_v4i64_v4f32:
1675 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1676 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
1677 ; CHECK-NEXT: vmv.v.v v8, v10
1679 %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1680 ret <4 x float> %evec
1683 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1684 define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1685 ; CHECK-LABEL: vsitofp_v4i64_v4f64:
1687 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1688 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1690 %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1691 ret <4 x double> %evec
1694 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
1695 define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
1696 ; CHECK-LABEL: vuitofp_v4i64_v4f64:
1698 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1699 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1701 %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1702 ret <4 x double> %evec
1705 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1706 define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1707 ; CHECK-LABEL: vsitofp_v8i64_v8f16:
1709 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1710 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1711 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1712 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1714 %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1715 ret <8 x half> %evec
1718 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
1719 define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
1720 ; CHECK-LABEL: vuitofp_v8i64_v8f16:
1722 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1723 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1724 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
1725 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
1727 %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1728 ret <8 x half> %evec
1731 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1732 define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1733 ; CHECK-LABEL: vsitofp_v8i64_v8f32:
1735 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1736 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
1737 ; CHECK-NEXT: vmv.v.v v8, v12
1739 %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1740 ret <8 x float> %evec
1743 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
1744 define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
1745 ; CHECK-LABEL: vuitofp_v8i64_v8f32:
1747 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
1748 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
1749 ; CHECK-NEXT: vmv.v.v v8, v12
1751 %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1752 ret <8 x float> %evec
1755 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1756 define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1757 ; CHECK-LABEL: vsitofp_v8i64_v8f64:
1759 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1760 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
1762 %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1763 ret <8 x double> %evec
1766 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
1767 define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
1768 ; CHECK-LABEL: vuitofp_v8i64_v8f64:
1770 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1771 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
1773 %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
1774 ret <8 x double> %evec