1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \
3 ; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \
5 ; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
6 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \
7 ; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32
8 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \
9 ; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64
10 ; RUN: llc -mtriple=riscv32 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \
11 ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s | FileCheck %s \
12 ; RUN: --check-prefixes=ELEN32,RV32ELEN32
13 ; RUN: llc -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \
14 ; RUN: -verify-machineinstrs -target-abi=lp64d < %s | FileCheck %s \
15 ; RUN: --check-prefixes=ELEN32,RV64ELEN32
17 define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
18 ; CHECK-LABEL: bitcast_v4i8_v32i1:
20 ; CHECK-NEXT: li a0, 32
21 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
22 ; CHECK-NEXT: vmxor.mm v0, v0, v8
25 ; ELEN32-LABEL: bitcast_v4i8_v32i1:
27 ; ELEN32-NEXT: li a0, 32
28 ; ELEN32-NEXT: vsetvli zero, a0, e8, m2, ta, ma
29 ; ELEN32-NEXT: vmxor.mm v0, v0, v8
31 %c = bitcast <4 x i8> %a to <32 x i1>
32 %d = xor <32 x i1> %b, %c
36 define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
37 ; CHECK-LABEL: bitcast_v1i8_i8:
39 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
40 ; CHECK-NEXT: vmv.x.s a0, v8
43 ; ELEN32-LABEL: bitcast_v1i8_i8:
45 ; ELEN32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
46 ; ELEN32-NEXT: vmv.x.s a0, v8
48 %b = bitcast <1 x i8> %a to i8
52 define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
53 ; CHECK-LABEL: bitcast_v2i8_i16:
55 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
56 ; CHECK-NEXT: vmv.x.s a0, v8
59 ; ELEN32-LABEL: bitcast_v2i8_i16:
61 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
62 ; ELEN32-NEXT: vmv.x.s a0, v8
64 %b = bitcast <2 x i8> %a to i16
68 define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
69 ; CHECK-LABEL: bitcast_v1i16_i16:
71 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
72 ; CHECK-NEXT: vmv.x.s a0, v8
75 ; ELEN32-LABEL: bitcast_v1i16_i16:
77 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
78 ; ELEN32-NEXT: vmv.x.s a0, v8
80 %b = bitcast <1 x i16> %a to i16
84 define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
85 ; CHECK-LABEL: bitcast_v4i8_i32:
87 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
88 ; CHECK-NEXT: vmv.x.s a0, v8
91 ; ELEN32-LABEL: bitcast_v4i8_i32:
93 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
94 ; ELEN32-NEXT: vmv.x.s a0, v8
96 %b = bitcast <4 x i8> %a to i32
100 define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
101 ; CHECK-LABEL: bitcast_v2i16_i32:
103 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
104 ; CHECK-NEXT: vmv.x.s a0, v8
107 ; ELEN32-LABEL: bitcast_v2i16_i32:
109 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
110 ; ELEN32-NEXT: vmv.x.s a0, v8
112 %b = bitcast <2 x i16> %a to i32
116 define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
117 ; CHECK-LABEL: bitcast_v1i32_i32:
119 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
120 ; CHECK-NEXT: vmv.x.s a0, v8
123 ; ELEN32-LABEL: bitcast_v1i32_i32:
125 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
126 ; ELEN32-NEXT: vmv.x.s a0, v8
128 %b = bitcast <1 x i32> %a to i32
132 define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
133 ; RV32-LABEL: bitcast_v8i8_i64:
135 ; RV32-NEXT: li a0, 32
136 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
137 ; RV32-NEXT: vsrl.vx v9, v8, a0
138 ; RV32-NEXT: vmv.x.s a1, v9
139 ; RV32-NEXT: vmv.x.s a0, v8
142 ; RV64-LABEL: bitcast_v8i8_i64:
144 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
145 ; RV64-NEXT: vmv.x.s a0, v8
148 ; RV32ELEN32-LABEL: bitcast_v8i8_i64:
149 ; RV32ELEN32: # %bb.0:
150 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
151 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
152 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
153 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
154 ; RV32ELEN32-NEXT: ret
156 ; RV64ELEN32-LABEL: bitcast_v8i8_i64:
157 ; RV64ELEN32: # %bb.0:
158 ; RV64ELEN32-NEXT: addi sp, sp, -16
159 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
160 ; RV64ELEN32-NEXT: addi a0, sp, 8
161 ; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
162 ; RV64ELEN32-NEXT: vse8.v v8, (a0)
163 ; RV64ELEN32-NEXT: ld a0, 8(sp)
164 ; RV64ELEN32-NEXT: addi sp, sp, 16
165 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 0
166 ; RV64ELEN32-NEXT: ret
167 %b = bitcast <8 x i8> %a to i64
171 define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
172 ; RV32-LABEL: bitcast_v4i16_i64:
174 ; RV32-NEXT: li a0, 32
175 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
176 ; RV32-NEXT: vsrl.vx v9, v8, a0
177 ; RV32-NEXT: vmv.x.s a1, v9
178 ; RV32-NEXT: vmv.x.s a0, v8
181 ; RV64-LABEL: bitcast_v4i16_i64:
183 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
184 ; RV64-NEXT: vmv.x.s a0, v8
187 ; RV32ELEN32-LABEL: bitcast_v4i16_i64:
188 ; RV32ELEN32: # %bb.0:
189 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
190 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
191 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
192 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
193 ; RV32ELEN32-NEXT: ret
195 ; RV64ELEN32-LABEL: bitcast_v4i16_i64:
196 ; RV64ELEN32: # %bb.0:
197 ; RV64ELEN32-NEXT: addi sp, sp, -16
198 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
199 ; RV64ELEN32-NEXT: addi a0, sp, 8
200 ; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
201 ; RV64ELEN32-NEXT: vse16.v v8, (a0)
202 ; RV64ELEN32-NEXT: ld a0, 8(sp)
203 ; RV64ELEN32-NEXT: addi sp, sp, 16
204 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 0
205 ; RV64ELEN32-NEXT: ret
206 %b = bitcast <4 x i16> %a to i64
210 define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
211 ; RV32-LABEL: bitcast_v2i32_i64:
213 ; RV32-NEXT: li a0, 32
214 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
215 ; RV32-NEXT: vsrl.vx v9, v8, a0
216 ; RV32-NEXT: vmv.x.s a1, v9
217 ; RV32-NEXT: vmv.x.s a0, v8
220 ; RV64-LABEL: bitcast_v2i32_i64:
222 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
223 ; RV64-NEXT: vmv.x.s a0, v8
226 ; RV32ELEN32-LABEL: bitcast_v2i32_i64:
227 ; RV32ELEN32: # %bb.0:
228 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
229 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
230 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
231 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
232 ; RV32ELEN32-NEXT: ret
234 ; RV64ELEN32-LABEL: bitcast_v2i32_i64:
235 ; RV64ELEN32: # %bb.0:
236 ; RV64ELEN32-NEXT: addi sp, sp, -16
237 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
238 ; RV64ELEN32-NEXT: addi a0, sp, 8
239 ; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
240 ; RV64ELEN32-NEXT: vse32.v v8, (a0)
241 ; RV64ELEN32-NEXT: ld a0, 8(sp)
242 ; RV64ELEN32-NEXT: addi sp, sp, 16
243 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 0
244 ; RV64ELEN32-NEXT: ret
245 %b = bitcast <2 x i32> %a to i64
249 define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
250 ; RV32-LABEL: bitcast_v1i64_i64:
252 ; RV32-NEXT: li a0, 32
253 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
254 ; RV32-NEXT: vsrl.vx v9, v8, a0
255 ; RV32-NEXT: vmv.x.s a1, v9
256 ; RV32-NEXT: vmv.x.s a0, v8
259 ; RV64-LABEL: bitcast_v1i64_i64:
261 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
262 ; RV64-NEXT: vmv.x.s a0, v8
265 ; ELEN32-LABEL: bitcast_v1i64_i64:
268 %b = bitcast <1 x i64> %a to i64
272 define bfloat @bitcast_v2i8_bf16(<2 x i8> %a) {
273 ; CHECK-LABEL: bitcast_v2i8_bf16:
275 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
276 ; CHECK-NEXT: vmv.x.s a0, v8
277 ; CHECK-NEXT: fmv.h.x fa0, a0
280 ; ELEN32-LABEL: bitcast_v2i8_bf16:
282 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
283 ; ELEN32-NEXT: vmv.x.s a0, v8
284 ; ELEN32-NEXT: fmv.h.x fa0, a0
286 %b = bitcast <2 x i8> %a to bfloat
290 define bfloat @bitcast_v1i16_bf16(<1 x i16> %a) {
291 ; CHECK-LABEL: bitcast_v1i16_bf16:
293 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
294 ; CHECK-NEXT: vmv.x.s a0, v8
295 ; CHECK-NEXT: fmv.h.x fa0, a0
298 ; ELEN32-LABEL: bitcast_v1i16_bf16:
300 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
301 ; ELEN32-NEXT: vmv.x.s a0, v8
302 ; ELEN32-NEXT: fmv.h.x fa0, a0
304 %b = bitcast <1 x i16> %a to bfloat
308 define bfloat @bitcast_v1bf16_bf16(<1 x bfloat> %a) {
309 ; CHECK-LABEL: bitcast_v1bf16_bf16:
311 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
312 ; CHECK-NEXT: vmv.x.s a0, v8
313 ; CHECK-NEXT: fmv.h.x fa0, a0
316 ; ELEN32-LABEL: bitcast_v1bf16_bf16:
318 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
319 ; ELEN32-NEXT: vmv.x.s a0, v8
320 ; ELEN32-NEXT: fmv.h.x fa0, a0
322 %b = bitcast <1 x bfloat> %a to bfloat
326 define <1 x bfloat> @bitcast_bf16_v1bf16(bfloat %a) {
327 ; CHECK-LABEL: bitcast_bf16_v1bf16:
329 ; CHECK-NEXT: fmv.x.h a0, fa0
330 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
331 ; CHECK-NEXT: vmv.s.x v8, a0
334 ; ELEN32-LABEL: bitcast_bf16_v1bf16:
336 ; ELEN32-NEXT: fmv.x.h a0, fa0
337 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
338 ; ELEN32-NEXT: vmv.s.x v8, a0
340 %b = bitcast bfloat %a to <1 x bfloat>
344 define half @bitcast_v2i8_f16(<2 x i8> %a) {
345 ; ZVFH-LABEL: bitcast_v2i8_f16:
347 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
348 ; ZVFH-NEXT: vfmv.f.s fa0, v8
351 ; ZVFHMIN-LABEL: bitcast_v2i8_f16:
353 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
354 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
355 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0
358 ; ELEN32-LABEL: bitcast_v2i8_f16:
360 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
361 ; ELEN32-NEXT: vfmv.f.s fa0, v8
363 %b = bitcast <2 x i8> %a to half
367 define half @bitcast_v1i16_f16(<1 x i16> %a) {
368 ; ZVFH-LABEL: bitcast_v1i16_f16:
370 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
371 ; ZVFH-NEXT: vfmv.f.s fa0, v8
374 ; ZVFHMIN-LABEL: bitcast_v1i16_f16:
376 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
377 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
378 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0
381 ; ELEN32-LABEL: bitcast_v1i16_f16:
383 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
384 ; ELEN32-NEXT: vfmv.f.s fa0, v8
386 %b = bitcast <1 x i16> %a to half
390 define half @bitcast_v1f16_f16(<1 x half> %a) {
391 ; ZVFH-LABEL: bitcast_v1f16_f16:
393 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
394 ; ZVFH-NEXT: vfmv.f.s fa0, v8
397 ; ZVFHMIN-LABEL: bitcast_v1f16_f16:
399 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
400 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
401 ; ZVFHMIN-NEXT: fmv.h.x fa0, a0
404 ; ELEN32-LABEL: bitcast_v1f16_f16:
406 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
407 ; ELEN32-NEXT: vfmv.f.s fa0, v8
409 %b = bitcast <1 x half> %a to half
413 define <1 x half> @bitcast_f16_v1f16(half %a) {
414 ; ZVFH-LABEL: bitcast_f16_v1f16:
416 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
417 ; ZVFH-NEXT: vfmv.s.f v8, fa0
420 ; ZVFHMIN-LABEL: bitcast_f16_v1f16:
422 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
423 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
424 ; ZVFHMIN-NEXT: vmv.s.x v8, a0
427 ; ELEN32-LABEL: bitcast_f16_v1f16:
429 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
430 ; ELEN32-NEXT: vfmv.s.f v8, fa0
432 %b = bitcast half %a to <1 x half>
436 define float @bitcast_v4i8_f32(<4 x i8> %a) {
437 ; CHECK-LABEL: bitcast_v4i8_f32:
439 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
440 ; CHECK-NEXT: vfmv.f.s fa0, v8
443 ; ELEN32-LABEL: bitcast_v4i8_f32:
445 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
446 ; ELEN32-NEXT: vfmv.f.s fa0, v8
448 %b = bitcast <4 x i8> %a to float
452 define float @bitcast_v2i16_f32(<2 x i16> %a) {
453 ; CHECK-LABEL: bitcast_v2i16_f32:
455 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
456 ; CHECK-NEXT: vfmv.f.s fa0, v8
459 ; ELEN32-LABEL: bitcast_v2i16_f32:
461 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
462 ; ELEN32-NEXT: vfmv.f.s fa0, v8
464 %b = bitcast <2 x i16> %a to float
468 define float @bitcast_v1i32_f32(<1 x i32> %a) {
469 ; CHECK-LABEL: bitcast_v1i32_f32:
471 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
472 ; CHECK-NEXT: vfmv.f.s fa0, v8
475 ; ELEN32-LABEL: bitcast_v1i32_f32:
477 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
478 ; ELEN32-NEXT: vfmv.f.s fa0, v8
480 %b = bitcast <1 x i32> %a to float
484 define double @bitcast_v8i8_f64(<8 x i8> %a) {
485 ; CHECK-LABEL: bitcast_v8i8_f64:
487 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
488 ; CHECK-NEXT: vfmv.f.s fa0, v8
491 ; ELEN32-LABEL: bitcast_v8i8_f64:
493 ; ELEN32-NEXT: addi sp, sp, -16
494 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
495 ; ELEN32-NEXT: addi a0, sp, 8
496 ; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
497 ; ELEN32-NEXT: vse8.v v8, (a0)
498 ; ELEN32-NEXT: fld fa0, 8(sp)
499 ; ELEN32-NEXT: addi sp, sp, 16
500 ; ELEN32-NEXT: .cfi_def_cfa_offset 0
502 %b = bitcast <8 x i8> %a to double
506 define double @bitcast_v4i16_f64(<4 x i16> %a) {
507 ; CHECK-LABEL: bitcast_v4i16_f64:
509 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
510 ; CHECK-NEXT: vfmv.f.s fa0, v8
513 ; ELEN32-LABEL: bitcast_v4i16_f64:
515 ; ELEN32-NEXT: addi sp, sp, -16
516 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
517 ; ELEN32-NEXT: addi a0, sp, 8
518 ; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
519 ; ELEN32-NEXT: vse16.v v8, (a0)
520 ; ELEN32-NEXT: fld fa0, 8(sp)
521 ; ELEN32-NEXT: addi sp, sp, 16
522 ; ELEN32-NEXT: .cfi_def_cfa_offset 0
524 %b = bitcast <4 x i16> %a to double
528 define double @bitcast_v2i32_f64(<2 x i32> %a) {
529 ; CHECK-LABEL: bitcast_v2i32_f64:
531 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
532 ; CHECK-NEXT: vfmv.f.s fa0, v8
535 ; ELEN32-LABEL: bitcast_v2i32_f64:
537 ; ELEN32-NEXT: addi sp, sp, -16
538 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
539 ; ELEN32-NEXT: addi a0, sp, 8
540 ; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
541 ; ELEN32-NEXT: vse32.v v8, (a0)
542 ; ELEN32-NEXT: fld fa0, 8(sp)
543 ; ELEN32-NEXT: addi sp, sp, 16
544 ; ELEN32-NEXT: .cfi_def_cfa_offset 0
546 %b = bitcast <2 x i32> %a to double
550 define double @bitcast_v1i64_f64(<1 x i64> %a) {
551 ; CHECK-LABEL: bitcast_v1i64_f64:
553 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
554 ; CHECK-NEXT: vfmv.f.s fa0, v8
557 ; RV32ELEN32-LABEL: bitcast_v1i64_f64:
558 ; RV32ELEN32: # %bb.0:
559 ; RV32ELEN32-NEXT: addi sp, sp, -16
560 ; RV32ELEN32-NEXT: .cfi_def_cfa_offset 16
561 ; RV32ELEN32-NEXT: sw a0, 8(sp)
562 ; RV32ELEN32-NEXT: sw a1, 12(sp)
563 ; RV32ELEN32-NEXT: fld fa0, 8(sp)
564 ; RV32ELEN32-NEXT: addi sp, sp, 16
565 ; RV32ELEN32-NEXT: .cfi_def_cfa_offset 0
566 ; RV32ELEN32-NEXT: ret
568 ; RV64ELEN32-LABEL: bitcast_v1i64_f64:
569 ; RV64ELEN32: # %bb.0:
570 ; RV64ELEN32-NEXT: fmv.d.x fa0, a0
571 ; RV64ELEN32-NEXT: ret
572 %b = bitcast <1 x i64> %a to double
576 define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
577 ; CHECK-LABEL: bitcast_i16_v1i16:
579 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
580 ; CHECK-NEXT: vmv.s.x v8, a0
583 ; ELEN32-LABEL: bitcast_i16_v1i16:
585 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
586 ; ELEN32-NEXT: vmv.s.x v8, a0
588 %b = bitcast i16 %a to <1 x i16>
592 define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
593 ; CHECK-LABEL: bitcast_i32_v2i16:
595 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
596 ; CHECK-NEXT: vmv.s.x v8, a0
599 ; ELEN32-LABEL: bitcast_i32_v2i16:
601 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
602 ; ELEN32-NEXT: vmv.s.x v8, a0
604 %b = bitcast i32 %a to <2 x i16>
608 define <1 x i32> @bitcast_i32_v1i32(i32 %a) {
609 ; CHECK-LABEL: bitcast_i32_v1i32:
611 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
612 ; CHECK-NEXT: vmv.s.x v8, a0
615 ; ELEN32-LABEL: bitcast_i32_v1i32:
617 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
618 ; ELEN32-NEXT: vmv.s.x v8, a0
620 %b = bitcast i32 %a to <1 x i32>
624 define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
625 ; RV32-LABEL: bitcast_i64_v4i16:
627 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
628 ; RV32-NEXT: vslide1down.vx v8, v8, a0
629 ; RV32-NEXT: vslide1down.vx v8, v8, a1
632 ; RV64-LABEL: bitcast_i64_v4i16:
634 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
635 ; RV64-NEXT: vmv.s.x v8, a0
638 ; RV32ELEN32-LABEL: bitcast_i64_v4i16:
639 ; RV32ELEN32: # %bb.0:
640 ; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
641 ; RV32ELEN32-NEXT: vmv.v.x v8, a0
642 ; RV32ELEN32-NEXT: vslide1down.vx v8, v8, a1
643 ; RV32ELEN32-NEXT: ret
645 ; RV64ELEN32-LABEL: bitcast_i64_v4i16:
646 ; RV64ELEN32: # %bb.0:
647 ; RV64ELEN32-NEXT: addi sp, sp, -16
648 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
649 ; RV64ELEN32-NEXT: sd a0, 8(sp)
650 ; RV64ELEN32-NEXT: addi a0, sp, 8
651 ; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
652 ; RV64ELEN32-NEXT: vle16.v v8, (a0)
653 ; RV64ELEN32-NEXT: addi sp, sp, 16
654 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 0
655 ; RV64ELEN32-NEXT: ret
656 %b = bitcast i64 %a to <4 x i16>
660 define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
661 ; RV32-LABEL: bitcast_i64_v2i32:
663 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
664 ; RV32-NEXT: vslide1down.vx v8, v8, a0
665 ; RV32-NEXT: vslide1down.vx v8, v8, a1
668 ; RV64-LABEL: bitcast_i64_v2i32:
670 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
671 ; RV64-NEXT: vmv.s.x v8, a0
674 ; RV32ELEN32-LABEL: bitcast_i64_v2i32:
675 ; RV32ELEN32: # %bb.0:
676 ; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
677 ; RV32ELEN32-NEXT: vmv.v.x v8, a0
678 ; RV32ELEN32-NEXT: vslide1down.vx v8, v8, a1
679 ; RV32ELEN32-NEXT: ret
681 ; RV64ELEN32-LABEL: bitcast_i64_v2i32:
682 ; RV64ELEN32: # %bb.0:
683 ; RV64ELEN32-NEXT: addi sp, sp, -16
684 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
685 ; RV64ELEN32-NEXT: sd a0, 8(sp)
686 ; RV64ELEN32-NEXT: addi a0, sp, 8
687 ; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
688 ; RV64ELEN32-NEXT: vle32.v v8, (a0)
689 ; RV64ELEN32-NEXT: addi sp, sp, 16
690 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 0
691 ; RV64ELEN32-NEXT: ret
692 %b = bitcast i64 %a to <2 x i32>
696 define <1 x i64> @bitcast_i64_v1i64(i64 %a) {
697 ; RV32-LABEL: bitcast_i64_v1i64:
699 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
700 ; RV32-NEXT: vslide1down.vx v8, v8, a0
701 ; RV32-NEXT: vslide1down.vx v8, v8, a1
704 ; RV64-LABEL: bitcast_i64_v1i64:
706 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
707 ; RV64-NEXT: vmv.s.x v8, a0
710 ; ELEN32-LABEL: bitcast_i64_v1i64:
713 %b = bitcast i64 %a to <1 x i64>