1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH32
3 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH64
4 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
5 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
7 define void @si2fp_v2i32_v2f32(ptr %x, ptr %y) {
8 ; CHECK-LABEL: si2fp_v2i32_v2f32:
10 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
11 ; CHECK-NEXT: vle32.v v8, (a0)
12 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
13 ; CHECK-NEXT: vse32.v v8, (a1)
15 %a = load <2 x i32>, ptr %x
16 %d = sitofp <2 x i32> %a to <2 x float>
17 store <2 x float> %d, ptr %y
21 define void @ui2fp_v2i32_v2f32(ptr %x, ptr %y) {
22 ; CHECK-LABEL: ui2fp_v2i32_v2f32:
24 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
25 ; CHECK-NEXT: vle32.v v8, (a0)
26 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
27 ; CHECK-NEXT: vse32.v v8, (a1)
29 %a = load <2 x i32>, ptr %x
30 %d = uitofp <2 x i32> %a to <2 x float>
31 store <2 x float> %d, ptr %y
35 define <2 x float> @si2fp_v2i1_v2f32(<2 x i1> %x) {
36 ; CHECK-LABEL: si2fp_v2i1_v2f32:
38 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
39 ; CHECK-NEXT: vmv.v.i v8, 0
40 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
41 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
43 %z = sitofp <2 x i1> %x to <2 x float>
47 define <2 x float> @si2fp_v2i7_v2f32(<2 x i7> %x) {
48 ; CHECK-LABEL: si2fp_v2i7_v2f32:
50 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
51 ; CHECK-NEXT: vadd.vv v8, v8, v8
52 ; CHECK-NEXT: vsra.vi v8, v8, 1
53 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
54 ; CHECK-NEXT: vsext.vf2 v9, v8
55 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
57 %z = sitofp <2 x i7> %x to <2 x float>
61 define <2 x float> @ui2fp_v2i7_v2f32(<2 x i7> %x) {
62 ; CHECK-LABEL: ui2fp_v2i7_v2f32:
64 ; CHECK-NEXT: li a0, 127
65 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
66 ; CHECK-NEXT: vand.vx v8, v8, a0
67 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
68 ; CHECK-NEXT: vzext.vf2 v9, v8
69 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
71 %z = uitofp <2 x i7> %x to <2 x float>
75 define <2 x float> @ui2fp_v2i1_v2f32(<2 x i1> %x) {
76 ; CHECK-LABEL: ui2fp_v2i1_v2f32:
78 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
79 ; CHECK-NEXT: vmv.v.i v8, 0
80 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
81 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
83 %z = uitofp <2 x i1> %x to <2 x float>
87 define void @si2fp_v3i32_v3f32(ptr %x, ptr %y) {
88 ; CHECK-LABEL: si2fp_v3i32_v3f32:
90 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
91 ; CHECK-NEXT: vle32.v v8, (a0)
92 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
93 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
94 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
95 ; CHECK-NEXT: vse32.v v8, (a1)
97 %a = load <3 x i32>, ptr %x
98 %d = sitofp <3 x i32> %a to <3 x float>
99 store <3 x float> %d, ptr %y
103 define void @ui2fp_v3i32_v3f32(ptr %x, ptr %y) {
104 ; CHECK-LABEL: ui2fp_v3i32_v3f32:
106 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
107 ; CHECK-NEXT: vle32.v v8, (a0)
108 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
109 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
110 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
111 ; CHECK-NEXT: vse32.v v8, (a1)
113 %a = load <3 x i32>, ptr %x
114 %d = uitofp <3 x i32> %a to <3 x float>
115 store <3 x float> %d, ptr %y
119 define <3 x float> @si2fp_v3i1_v3f32(<3 x i1> %x) {
120 ; CHECK-LABEL: si2fp_v3i1_v3f32:
122 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
123 ; CHECK-NEXT: vmv.v.i v8, 0
124 ; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
125 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
127 %z = sitofp <3 x i1> %x to <3 x float>
131 ; FIXME: This gets expanded instead of widened + promoted
132 define <3 x float> @si2fp_v3i7_v3f32(<3 x i7> %x) {
133 ; ZVFH32-LABEL: si2fp_v3i7_v3f32:
135 ; ZVFH32-NEXT: lw a1, 0(a0)
136 ; ZVFH32-NEXT: lw a2, 4(a0)
137 ; ZVFH32-NEXT: lw a0, 8(a0)
138 ; ZVFH32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
139 ; ZVFH32-NEXT: vmv.v.x v8, a1
140 ; ZVFH32-NEXT: vslide1down.vx v8, v8, a2
141 ; ZVFH32-NEXT: vslide1down.vx v8, v8, a0
142 ; ZVFH32-NEXT: vslidedown.vi v8, v8, 1
143 ; ZVFH32-NEXT: vadd.vv v8, v8, v8
144 ; ZVFH32-NEXT: vsra.vi v8, v8, 1
145 ; ZVFH32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
146 ; ZVFH32-NEXT: vsext.vf2 v9, v8
147 ; ZVFH32-NEXT: vfwcvt.f.x.v v8, v9
150 ; ZVFH64-LABEL: si2fp_v3i7_v3f32:
152 ; ZVFH64-NEXT: ld a1, 0(a0)
153 ; ZVFH64-NEXT: ld a2, 8(a0)
154 ; ZVFH64-NEXT: ld a0, 16(a0)
155 ; ZVFH64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
156 ; ZVFH64-NEXT: vmv.v.x v8, a1
157 ; ZVFH64-NEXT: vslide1down.vx v8, v8, a2
158 ; ZVFH64-NEXT: vslide1down.vx v8, v8, a0
159 ; ZVFH64-NEXT: vslidedown.vi v8, v8, 1
160 ; ZVFH64-NEXT: vadd.vv v8, v8, v8
161 ; ZVFH64-NEXT: vsra.vi v8, v8, 1
162 ; ZVFH64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
163 ; ZVFH64-NEXT: vsext.vf2 v9, v8
164 ; ZVFH64-NEXT: vfwcvt.f.x.v v8, v9
167 ; ZVFHMIN32-LABEL: si2fp_v3i7_v3f32:
168 ; ZVFHMIN32: # %bb.0:
169 ; ZVFHMIN32-NEXT: lw a1, 0(a0)
170 ; ZVFHMIN32-NEXT: lw a2, 4(a0)
171 ; ZVFHMIN32-NEXT: lw a0, 8(a0)
172 ; ZVFHMIN32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
173 ; ZVFHMIN32-NEXT: vmv.v.x v8, a1
174 ; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a2
175 ; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a0
176 ; ZVFHMIN32-NEXT: vslidedown.vi v8, v8, 1
177 ; ZVFHMIN32-NEXT: vadd.vv v8, v8, v8
178 ; ZVFHMIN32-NEXT: vsra.vi v8, v8, 1
179 ; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
180 ; ZVFHMIN32-NEXT: vsext.vf2 v9, v8
181 ; ZVFHMIN32-NEXT: vfwcvt.f.x.v v8, v9
182 ; ZVFHMIN32-NEXT: ret
184 ; ZVFHMIN64-LABEL: si2fp_v3i7_v3f32:
185 ; ZVFHMIN64: # %bb.0:
186 ; ZVFHMIN64-NEXT: ld a1, 0(a0)
187 ; ZVFHMIN64-NEXT: ld a2, 8(a0)
188 ; ZVFHMIN64-NEXT: ld a0, 16(a0)
189 ; ZVFHMIN64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
190 ; ZVFHMIN64-NEXT: vmv.v.x v8, a1
191 ; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a2
192 ; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a0
193 ; ZVFHMIN64-NEXT: vslidedown.vi v8, v8, 1
194 ; ZVFHMIN64-NEXT: vadd.vv v8, v8, v8
195 ; ZVFHMIN64-NEXT: vsra.vi v8, v8, 1
196 ; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
197 ; ZVFHMIN64-NEXT: vsext.vf2 v9, v8
198 ; ZVFHMIN64-NEXT: vfwcvt.f.x.v v8, v9
199 ; ZVFHMIN64-NEXT: ret
200 %z = sitofp <3 x i7> %x to <3 x float>
204 ; FIXME: This gets expanded instead of widened + promoted
205 define <3 x float> @ui2fp_v3i7_v3f32(<3 x i7> %x) {
206 ; ZVFH32-LABEL: ui2fp_v3i7_v3f32:
208 ; ZVFH32-NEXT: lw a1, 0(a0)
209 ; ZVFH32-NEXT: lw a2, 4(a0)
210 ; ZVFH32-NEXT: lw a0, 8(a0)
211 ; ZVFH32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
212 ; ZVFH32-NEXT: vmv.v.x v8, a1
213 ; ZVFH32-NEXT: vslide1down.vx v8, v8, a2
214 ; ZVFH32-NEXT: vslide1down.vx v8, v8, a0
215 ; ZVFH32-NEXT: vslidedown.vi v8, v8, 1
216 ; ZVFH32-NEXT: li a0, 127
217 ; ZVFH32-NEXT: vand.vx v8, v8, a0
218 ; ZVFH32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
219 ; ZVFH32-NEXT: vzext.vf2 v9, v8
220 ; ZVFH32-NEXT: vfwcvt.f.xu.v v8, v9
223 ; ZVFH64-LABEL: ui2fp_v3i7_v3f32:
225 ; ZVFH64-NEXT: ld a1, 0(a0)
226 ; ZVFH64-NEXT: ld a2, 8(a0)
227 ; ZVFH64-NEXT: ld a0, 16(a0)
228 ; ZVFH64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
229 ; ZVFH64-NEXT: vmv.v.x v8, a1
230 ; ZVFH64-NEXT: vslide1down.vx v8, v8, a2
231 ; ZVFH64-NEXT: vslide1down.vx v8, v8, a0
232 ; ZVFH64-NEXT: vslidedown.vi v8, v8, 1
233 ; ZVFH64-NEXT: li a0, 127
234 ; ZVFH64-NEXT: vand.vx v8, v8, a0
235 ; ZVFH64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
236 ; ZVFH64-NEXT: vzext.vf2 v9, v8
237 ; ZVFH64-NEXT: vfwcvt.f.xu.v v8, v9
240 ; ZVFHMIN32-LABEL: ui2fp_v3i7_v3f32:
241 ; ZVFHMIN32: # %bb.0:
242 ; ZVFHMIN32-NEXT: lw a1, 0(a0)
243 ; ZVFHMIN32-NEXT: lw a2, 4(a0)
244 ; ZVFHMIN32-NEXT: lw a0, 8(a0)
245 ; ZVFHMIN32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
246 ; ZVFHMIN32-NEXT: vmv.v.x v8, a1
247 ; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a2
248 ; ZVFHMIN32-NEXT: vslide1down.vx v8, v8, a0
249 ; ZVFHMIN32-NEXT: vslidedown.vi v8, v8, 1
250 ; ZVFHMIN32-NEXT: li a0, 127
251 ; ZVFHMIN32-NEXT: vand.vx v8, v8, a0
252 ; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
253 ; ZVFHMIN32-NEXT: vzext.vf2 v9, v8
254 ; ZVFHMIN32-NEXT: vfwcvt.f.xu.v v8, v9
255 ; ZVFHMIN32-NEXT: ret
257 ; ZVFHMIN64-LABEL: ui2fp_v3i7_v3f32:
258 ; ZVFHMIN64: # %bb.0:
259 ; ZVFHMIN64-NEXT: ld a1, 0(a0)
260 ; ZVFHMIN64-NEXT: ld a2, 8(a0)
261 ; ZVFHMIN64-NEXT: ld a0, 16(a0)
262 ; ZVFHMIN64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
263 ; ZVFHMIN64-NEXT: vmv.v.x v8, a1
264 ; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a2
265 ; ZVFHMIN64-NEXT: vslide1down.vx v8, v8, a0
266 ; ZVFHMIN64-NEXT: vslidedown.vi v8, v8, 1
267 ; ZVFHMIN64-NEXT: li a0, 127
268 ; ZVFHMIN64-NEXT: vand.vx v8, v8, a0
269 ; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
270 ; ZVFHMIN64-NEXT: vzext.vf2 v9, v8
271 ; ZVFHMIN64-NEXT: vfwcvt.f.xu.v v8, v9
272 ; ZVFHMIN64-NEXT: ret
273 %z = uitofp <3 x i7> %x to <3 x float>
277 define <3 x float> @ui2fp_v3i1_v3f32(<3 x i1> %x) {
278 ; CHECK-LABEL: ui2fp_v3i1_v3f32:
280 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
281 ; CHECK-NEXT: vmv.v.i v8, 0
282 ; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
283 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
285 %z = uitofp <3 x i1> %x to <3 x float>
289 define void @si2fp_v8i32_v8f32(ptr %x, ptr %y) {
290 ; CHECK-LABEL: si2fp_v8i32_v8f32:
292 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
293 ; CHECK-NEXT: vle32.v v8, (a0)
294 ; CHECK-NEXT: vfcvt.f.x.v v8, v8
295 ; CHECK-NEXT: vse32.v v8, (a1)
297 %a = load <8 x i32>, ptr %x
298 %d = sitofp <8 x i32> %a to <8 x float>
299 store <8 x float> %d, ptr %y
303 define void @ui2fp_v8i32_v8f32(ptr %x, ptr %y) {
304 ; CHECK-LABEL: ui2fp_v8i32_v8f32:
306 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
307 ; CHECK-NEXT: vle32.v v8, (a0)
308 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8
309 ; CHECK-NEXT: vse32.v v8, (a1)
311 %a = load <8 x i32>, ptr %x
312 %d = uitofp <8 x i32> %a to <8 x float>
313 store <8 x float> %d, ptr %y
317 define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
318 ; CHECK-LABEL: si2fp_v8i1_v8f32:
320 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
321 ; CHECK-NEXT: vmv.v.i v8, 0
322 ; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
323 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10
325 %z = sitofp <8 x i1> %x to <8 x float>
329 define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
330 ; CHECK-LABEL: ui2fp_v8i1_v8f32:
332 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
333 ; CHECK-NEXT: vmv.v.i v8, 0
334 ; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
335 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
337 %z = uitofp <8 x i1> %x to <8 x float>
341 define void @si2fp_v2i16_v2f64(ptr %x, ptr %y) {
342 ; CHECK-LABEL: si2fp_v2i16_v2f64:
344 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
345 ; CHECK-NEXT: vle16.v v8, (a0)
346 ; CHECK-NEXT: vsext.vf2 v9, v8
347 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9
348 ; CHECK-NEXT: vse64.v v8, (a1)
350 %a = load <2 x i16>, ptr %x
351 %d = sitofp <2 x i16> %a to <2 x double>
352 store <2 x double> %d, ptr %y
356 define void @ui2fp_v2i16_v2f64(ptr %x, ptr %y) {
357 ; CHECK-LABEL: ui2fp_v2i16_v2f64:
359 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
360 ; CHECK-NEXT: vle16.v v8, (a0)
361 ; CHECK-NEXT: vzext.vf2 v9, v8
362 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
363 ; CHECK-NEXT: vse64.v v8, (a1)
365 %a = load <2 x i16>, ptr %x
366 %d = uitofp <2 x i16> %a to <2 x double>
367 store <2 x double> %d, ptr %y
371 define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
372 ; CHECK-LABEL: si2fp_v8i16_v8f64:
374 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
375 ; CHECK-NEXT: vle16.v v8, (a0)
376 ; CHECK-NEXT: vsext.vf2 v10, v8
377 ; CHECK-NEXT: vfwcvt.f.x.v v12, v10
378 ; CHECK-NEXT: vse64.v v12, (a1)
380 %a = load <8 x i16>, ptr %x
381 %d = sitofp <8 x i16> %a to <8 x double>
382 store <8 x double> %d, ptr %y
386 define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
387 ; CHECK-LABEL: ui2fp_v8i16_v8f64:
389 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
390 ; CHECK-NEXT: vle16.v v8, (a0)
391 ; CHECK-NEXT: vzext.vf2 v10, v8
392 ; CHECK-NEXT: vfwcvt.f.xu.v v12, v10
393 ; CHECK-NEXT: vse64.v v12, (a1)
395 %a = load <8 x i16>, ptr %x
396 %d = uitofp <8 x i16> %a to <8 x double>
397 store <8 x double> %d, ptr %y
401 define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
402 ; CHECK-LABEL: si2fp_v8i1_v8f64:
404 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
405 ; CHECK-NEXT: vmv.v.i v8, 0
406 ; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
407 ; CHECK-NEXT: vfwcvt.f.x.v v8, v12
409 %z = sitofp <8 x i1> %x to <8 x double>
413 define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
414 ; CHECK-LABEL: ui2fp_v8i1_v8f64:
416 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
417 ; CHECK-NEXT: vmv.v.i v8, 0
418 ; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
419 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
421 %z = uitofp <8 x i1> %x to <8 x double>
425 define void @si2fp_v2i64_v2f16(ptr %x, ptr %y) {
426 ; CHECK-LABEL: si2fp_v2i64_v2f16:
428 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
429 ; CHECK-NEXT: vle64.v v8, (a0)
430 ; CHECK-NEXT: vfncvt.f.x.w v9, v8
431 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
432 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
433 ; CHECK-NEXT: vse16.v v8, (a1)
435 %a = load <2 x i64>, ptr %x
436 %d = sitofp <2 x i64> %a to <2 x half>
437 store <2 x half> %d, ptr %y
441 define void @ui2fp_v2i64_v2f16(ptr %x, ptr %y) {
442 ; CHECK-LABEL: ui2fp_v2i64_v2f16:
444 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
445 ; CHECK-NEXT: vle64.v v8, (a0)
446 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
447 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
448 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
449 ; CHECK-NEXT: vse16.v v8, (a1)
451 %a = load <2 x i64>, ptr %x
452 %d = uitofp <2 x i64> %a to <2 x half>
453 store <2 x half> %d, ptr %y
457 define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) {
458 ; ZVFH-LABEL: si2fp_v2i1_v2f16:
460 ; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
461 ; ZVFH-NEXT: vmv.v.i v8, 0
462 ; ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
463 ; ZVFH-NEXT: vfwcvt.f.x.v v8, v9
466 ; ZVFHMIN-LABEL: si2fp_v2i1_v2f16:
468 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
469 ; ZVFHMIN-NEXT: vmv.v.i v8, 0
470 ; ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
471 ; ZVFHMIN-NEXT: vfwcvt.f.x.v v9, v8
472 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
474 %z = sitofp <2 x i1> %x to <2 x half>
478 define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) {
479 ; ZVFH-LABEL: ui2fp_v2i1_v2f16:
481 ; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
482 ; ZVFH-NEXT: vmv.v.i v8, 0
483 ; ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
484 ; ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
487 ; ZVFHMIN-LABEL: ui2fp_v2i1_v2f16:
489 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
490 ; ZVFHMIN-NEXT: vmv.v.i v8, 0
491 ; ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
492 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v9, v8
493 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
495 %z = uitofp <2 x i1> %x to <2 x half>
499 define void @si2fp_v8i64_v8f16(ptr %x, ptr %y) {
500 ; CHECK-LABEL: si2fp_v8i64_v8f16:
502 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
503 ; CHECK-NEXT: vle64.v v8, (a0)
504 ; CHECK-NEXT: vfncvt.f.x.w v12, v8
505 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
506 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
507 ; CHECK-NEXT: vse16.v v8, (a1)
509 %a = load <8 x i64>, ptr %x
510 %d = sitofp <8 x i64> %a to <8 x half>
511 store <8 x half> %d, ptr %y
515 define void @ui2fp_v8i64_v8f16(ptr %x, ptr %y) {
516 ; CHECK-LABEL: ui2fp_v8i64_v8f16:
518 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
519 ; CHECK-NEXT: vle64.v v8, (a0)
520 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
521 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
522 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
523 ; CHECK-NEXT: vse16.v v8, (a1)
525 %a = load <8 x i64>, ptr %x
526 %d = uitofp <8 x i64> %a to <8 x half>
527 store <8 x half> %d, ptr %y
531 define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) {
532 ; ZVFH-LABEL: si2fp_v8i1_v8f16:
534 ; ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
535 ; ZVFH-NEXT: vmv.v.i v8, 0
536 ; ZVFH-NEXT: vmerge.vim v9, v8, -1, v0
537 ; ZVFH-NEXT: vfwcvt.f.x.v v8, v9
540 ; ZVFHMIN-LABEL: si2fp_v8i1_v8f16:
542 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
543 ; ZVFHMIN-NEXT: vmv.v.i v8, 0
544 ; ZVFHMIN-NEXT: vmerge.vim v8, v8, -1, v0
545 ; ZVFHMIN-NEXT: vfwcvt.f.x.v v10, v8
546 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
548 %z = sitofp <8 x i1> %x to <8 x half>
552 define <8 x half> @ui2fp_v8i1_v8f16(<8 x i1> %x) {
553 ; ZVFH-LABEL: ui2fp_v8i1_v8f16:
555 ; ZVFH-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
556 ; ZVFH-NEXT: vmv.v.i v8, 0
557 ; ZVFH-NEXT: vmerge.vim v9, v8, 1, v0
558 ; ZVFH-NEXT: vfwcvt.f.xu.v v8, v9
561 ; ZVFHMIN-LABEL: ui2fp_v8i1_v8f16:
563 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
564 ; ZVFHMIN-NEXT: vmv.v.i v8, 0
565 ; ZVFHMIN-NEXT: vmerge.vim v8, v8, 1, v0
566 ; ZVFHMIN-NEXT: vfwcvt.f.xu.v v10, v8
567 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
569 %z = uitofp <8 x i1> %x to <8 x half>