1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
3 ; RUN: -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
5 ; RUN: -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,RV64
6 ; RUN: llc -mtriple=riscv32 -mattr=+zve32f,+zvl128b,+d,+zfh,+zvfh \
7 ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s | FileCheck %s \
8 ; RUN: --check-prefixes=ELEN32,RV32ELEN32
9 ; RUN: llc -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+d,+zfh,+zvfh \
10 ; RUN: -verify-machineinstrs -target-abi=lp64d < %s | FileCheck %s \
11 ; RUN: --check-prefixes=ELEN32,RV64ELEN32
13 define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
14 ; CHECK-LABEL: bitcast_v4i8_v32i1:
16 ; CHECK-NEXT: li a0, 32
17 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
18 ; CHECK-NEXT: vmxor.mm v0, v0, v8
21 ; ELEN32-LABEL: bitcast_v4i8_v32i1:
23 ; ELEN32-NEXT: li a0, 32
24 ; ELEN32-NEXT: vsetvli zero, a0, e8, m2, ta, ma
25 ; ELEN32-NEXT: vmxor.mm v0, v0, v8
27 %c = bitcast <4 x i8> %a to <32 x i1>
28 %d = xor <32 x i1> %b, %c
32 define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
33 ; CHECK-LABEL: bitcast_v1i8_i8:
35 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
36 ; CHECK-NEXT: vmv.x.s a0, v8
39 ; ELEN32-LABEL: bitcast_v1i8_i8:
41 ; ELEN32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
42 ; ELEN32-NEXT: vmv.x.s a0, v8
44 %b = bitcast <1 x i8> %a to i8
48 define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
49 ; CHECK-LABEL: bitcast_v2i8_i16:
51 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
52 ; CHECK-NEXT: vmv.x.s a0, v8
55 ; ELEN32-LABEL: bitcast_v2i8_i16:
57 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
58 ; ELEN32-NEXT: vmv.x.s a0, v8
60 %b = bitcast <2 x i8> %a to i16
64 define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
65 ; CHECK-LABEL: bitcast_v1i16_i16:
67 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
68 ; CHECK-NEXT: vmv.x.s a0, v8
71 ; ELEN32-LABEL: bitcast_v1i16_i16:
73 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
74 ; ELEN32-NEXT: vmv.x.s a0, v8
76 %b = bitcast <1 x i16> %a to i16
80 define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
81 ; CHECK-LABEL: bitcast_v4i8_i32:
83 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
84 ; CHECK-NEXT: vmv.x.s a0, v8
87 ; ELEN32-LABEL: bitcast_v4i8_i32:
89 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
90 ; ELEN32-NEXT: vmv.x.s a0, v8
92 %b = bitcast <4 x i8> %a to i32
96 define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
97 ; CHECK-LABEL: bitcast_v2i16_i32:
99 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
100 ; CHECK-NEXT: vmv.x.s a0, v8
103 ; ELEN32-LABEL: bitcast_v2i16_i32:
105 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
106 ; ELEN32-NEXT: vmv.x.s a0, v8
108 %b = bitcast <2 x i16> %a to i32
112 define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
113 ; CHECK-LABEL: bitcast_v1i32_i32:
115 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
116 ; CHECK-NEXT: vmv.x.s a0, v8
119 ; ELEN32-LABEL: bitcast_v1i32_i32:
121 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
122 ; ELEN32-NEXT: vmv.x.s a0, v8
124 %b = bitcast <1 x i32> %a to i32
128 define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
129 ; RV32-LABEL: bitcast_v8i8_i64:
131 ; RV32-NEXT: li a0, 32
132 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
133 ; RV32-NEXT: vsrl.vx v9, v8, a0
134 ; RV32-NEXT: vmv.x.s a1, v9
135 ; RV32-NEXT: vmv.x.s a0, v8
138 ; RV64-LABEL: bitcast_v8i8_i64:
140 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
141 ; RV64-NEXT: vmv.x.s a0, v8
144 ; RV32ELEN32-LABEL: bitcast_v8i8_i64:
145 ; RV32ELEN32: # %bb.0:
146 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
147 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
148 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
149 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
150 ; RV32ELEN32-NEXT: ret
152 ; RV64ELEN32-LABEL: bitcast_v8i8_i64:
153 ; RV64ELEN32: # %bb.0:
154 ; RV64ELEN32-NEXT: addi sp, sp, -16
155 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
156 ; RV64ELEN32-NEXT: addi a0, sp, 8
157 ; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
158 ; RV64ELEN32-NEXT: vse8.v v8, (a0)
159 ; RV64ELEN32-NEXT: ld a0, 8(sp)
160 ; RV64ELEN32-NEXT: addi sp, sp, 16
161 ; RV64ELEN32-NEXT: ret
162 %b = bitcast <8 x i8> %a to i64
166 define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
167 ; RV32-LABEL: bitcast_v4i16_i64:
169 ; RV32-NEXT: li a0, 32
170 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
171 ; RV32-NEXT: vsrl.vx v9, v8, a0
172 ; RV32-NEXT: vmv.x.s a1, v9
173 ; RV32-NEXT: vmv.x.s a0, v8
176 ; RV64-LABEL: bitcast_v4i16_i64:
178 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
179 ; RV64-NEXT: vmv.x.s a0, v8
182 ; RV32ELEN32-LABEL: bitcast_v4i16_i64:
183 ; RV32ELEN32: # %bb.0:
184 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
185 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
186 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
187 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
188 ; RV32ELEN32-NEXT: ret
190 ; RV64ELEN32-LABEL: bitcast_v4i16_i64:
191 ; RV64ELEN32: # %bb.0:
192 ; RV64ELEN32-NEXT: addi sp, sp, -16
193 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
194 ; RV64ELEN32-NEXT: addi a0, sp, 8
195 ; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
196 ; RV64ELEN32-NEXT: vse16.v v8, (a0)
197 ; RV64ELEN32-NEXT: ld a0, 8(sp)
198 ; RV64ELEN32-NEXT: addi sp, sp, 16
199 ; RV64ELEN32-NEXT: ret
200 %b = bitcast <4 x i16> %a to i64
204 define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
205 ; RV32-LABEL: bitcast_v2i32_i64:
207 ; RV32-NEXT: li a0, 32
208 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
209 ; RV32-NEXT: vsrl.vx v9, v8, a0
210 ; RV32-NEXT: vmv.x.s a1, v9
211 ; RV32-NEXT: vmv.x.s a0, v8
214 ; RV64-LABEL: bitcast_v2i32_i64:
216 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
217 ; RV64-NEXT: vmv.x.s a0, v8
220 ; RV32ELEN32-LABEL: bitcast_v2i32_i64:
221 ; RV32ELEN32: # %bb.0:
222 ; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
223 ; RV32ELEN32-NEXT: vmv.x.s a0, v8
224 ; RV32ELEN32-NEXT: vslidedown.vi v8, v8, 1
225 ; RV32ELEN32-NEXT: vmv.x.s a1, v8
226 ; RV32ELEN32-NEXT: ret
228 ; RV64ELEN32-LABEL: bitcast_v2i32_i64:
229 ; RV64ELEN32: # %bb.0:
230 ; RV64ELEN32-NEXT: addi sp, sp, -16
231 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
232 ; RV64ELEN32-NEXT: addi a0, sp, 8
233 ; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
234 ; RV64ELEN32-NEXT: vse32.v v8, (a0)
235 ; RV64ELEN32-NEXT: ld a0, 8(sp)
236 ; RV64ELEN32-NEXT: addi sp, sp, 16
237 ; RV64ELEN32-NEXT: ret
238 %b = bitcast <2 x i32> %a to i64
242 define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
243 ; RV32-LABEL: bitcast_v1i64_i64:
245 ; RV32-NEXT: li a0, 32
246 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
247 ; RV32-NEXT: vsrl.vx v9, v8, a0
248 ; RV32-NEXT: vmv.x.s a1, v9
249 ; RV32-NEXT: vmv.x.s a0, v8
252 ; RV64-LABEL: bitcast_v1i64_i64:
254 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
255 ; RV64-NEXT: vmv.x.s a0, v8
258 ; ELEN32-LABEL: bitcast_v1i64_i64:
261 %b = bitcast <1 x i64> %a to i64
265 define half @bitcast_v2i8_f16(<2 x i8> %a) {
266 ; CHECK-LABEL: bitcast_v2i8_f16:
268 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
269 ; CHECK-NEXT: vfmv.f.s fa0, v8
272 ; ELEN32-LABEL: bitcast_v2i8_f16:
274 ; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
275 ; ELEN32-NEXT: vfmv.f.s fa0, v8
277 %b = bitcast <2 x i8> %a to half
281 define half @bitcast_v1i16_f16(<1 x i16> %a) {
282 ; CHECK-LABEL: bitcast_v1i16_f16:
284 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
285 ; CHECK-NEXT: vfmv.f.s fa0, v8
288 ; ELEN32-LABEL: bitcast_v1i16_f16:
290 ; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
291 ; ELEN32-NEXT: vfmv.f.s fa0, v8
293 %b = bitcast <1 x i16> %a to half
297 define float @bitcast_v4i8_f32(<4 x i8> %a) {
298 ; CHECK-LABEL: bitcast_v4i8_f32:
300 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
301 ; CHECK-NEXT: vfmv.f.s fa0, v8
304 ; ELEN32-LABEL: bitcast_v4i8_f32:
306 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
307 ; ELEN32-NEXT: vfmv.f.s fa0, v8
309 %b = bitcast <4 x i8> %a to float
313 define float @bitcast_v2i16_f32(<2 x i16> %a) {
314 ; CHECK-LABEL: bitcast_v2i16_f32:
316 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
317 ; CHECK-NEXT: vfmv.f.s fa0, v8
320 ; ELEN32-LABEL: bitcast_v2i16_f32:
322 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
323 ; ELEN32-NEXT: vfmv.f.s fa0, v8
325 %b = bitcast <2 x i16> %a to float
329 define float @bitcast_v1i32_f32(<1 x i32> %a) {
330 ; CHECK-LABEL: bitcast_v1i32_f32:
332 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
333 ; CHECK-NEXT: vfmv.f.s fa0, v8
336 ; ELEN32-LABEL: bitcast_v1i32_f32:
338 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
339 ; ELEN32-NEXT: vfmv.f.s fa0, v8
341 %b = bitcast <1 x i32> %a to float
345 define double @bitcast_v8i8_f64(<8 x i8> %a) {
346 ; CHECK-LABEL: bitcast_v8i8_f64:
348 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
349 ; CHECK-NEXT: vfmv.f.s fa0, v8
352 ; ELEN32-LABEL: bitcast_v8i8_f64:
354 ; ELEN32-NEXT: addi sp, sp, -16
355 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
356 ; ELEN32-NEXT: addi a0, sp, 8
357 ; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
358 ; ELEN32-NEXT: vse8.v v8, (a0)
359 ; ELEN32-NEXT: fld fa0, 8(sp)
360 ; ELEN32-NEXT: addi sp, sp, 16
362 %b = bitcast <8 x i8> %a to double
366 define double @bitcast_v4i16_f64(<4 x i16> %a) {
367 ; CHECK-LABEL: bitcast_v4i16_f64:
369 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
370 ; CHECK-NEXT: vfmv.f.s fa0, v8
373 ; ELEN32-LABEL: bitcast_v4i16_f64:
375 ; ELEN32-NEXT: addi sp, sp, -16
376 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
377 ; ELEN32-NEXT: addi a0, sp, 8
378 ; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
379 ; ELEN32-NEXT: vse16.v v8, (a0)
380 ; ELEN32-NEXT: fld fa0, 8(sp)
381 ; ELEN32-NEXT: addi sp, sp, 16
383 %b = bitcast <4 x i16> %a to double
387 define double @bitcast_v2i32_f64(<2 x i32> %a) {
388 ; CHECK-LABEL: bitcast_v2i32_f64:
390 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
391 ; CHECK-NEXT: vfmv.f.s fa0, v8
394 ; ELEN32-LABEL: bitcast_v2i32_f64:
396 ; ELEN32-NEXT: addi sp, sp, -16
397 ; ELEN32-NEXT: .cfi_def_cfa_offset 16
398 ; ELEN32-NEXT: addi a0, sp, 8
399 ; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
400 ; ELEN32-NEXT: vse32.v v8, (a0)
401 ; ELEN32-NEXT: fld fa0, 8(sp)
402 ; ELEN32-NEXT: addi sp, sp, 16
404 %b = bitcast <2 x i32> %a to double
408 define double @bitcast_v1i64_f64(<1 x i64> %a) {
409 ; CHECK-LABEL: bitcast_v1i64_f64:
411 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
412 ; CHECK-NEXT: vfmv.f.s fa0, v8
415 ; RV32ELEN32-LABEL: bitcast_v1i64_f64:
416 ; RV32ELEN32: # %bb.0:
417 ; RV32ELEN32-NEXT: addi sp, sp, -16
418 ; RV32ELEN32-NEXT: .cfi_def_cfa_offset 16
419 ; RV32ELEN32-NEXT: sw a0, 8(sp)
420 ; RV32ELEN32-NEXT: sw a1, 12(sp)
421 ; RV32ELEN32-NEXT: fld fa0, 8(sp)
422 ; RV32ELEN32-NEXT: addi sp, sp, 16
423 ; RV32ELEN32-NEXT: ret
425 ; RV64ELEN32-LABEL: bitcast_v1i64_f64:
426 ; RV64ELEN32: # %bb.0:
427 ; RV64ELEN32-NEXT: fmv.d.x fa0, a0
428 ; RV64ELEN32-NEXT: ret
429 %b = bitcast <1 x i64> %a to double
433 define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
434 ; CHECK-LABEL: bitcast_i16_v1i16:
436 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
437 ; CHECK-NEXT: vmv.s.x v8, a0
440 ; ELEN32-LABEL: bitcast_i16_v1i16:
442 ; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
443 ; ELEN32-NEXT: vmv.s.x v8, a0
445 %b = bitcast i16 %a to <1 x i16>
449 define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
450 ; CHECK-LABEL: bitcast_i32_v2i16:
452 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
453 ; CHECK-NEXT: vmv.s.x v8, a0
456 ; ELEN32-LABEL: bitcast_i32_v2i16:
458 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
459 ; ELEN32-NEXT: vmv.s.x v8, a0
461 %b = bitcast i32 %a to <2 x i16>
465 define <1 x i32> @bitcast_i32_v1i32(i32 %a) {
466 ; CHECK-LABEL: bitcast_i32_v1i32:
468 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
469 ; CHECK-NEXT: vmv.s.x v8, a0
472 ; ELEN32-LABEL: bitcast_i32_v1i32:
474 ; ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
475 ; ELEN32-NEXT: vmv.s.x v8, a0
477 %b = bitcast i32 %a to <1 x i32>
481 define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
482 ; RV32-LABEL: bitcast_i64_v4i16:
484 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
485 ; RV32-NEXT: vslide1down.vx v8, v8, a0
486 ; RV32-NEXT: vslide1down.vx v8, v8, a1
489 ; RV64-LABEL: bitcast_i64_v4i16:
491 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
492 ; RV64-NEXT: vmv.s.x v8, a0
495 ; RV32ELEN32-LABEL: bitcast_i64_v4i16:
496 ; RV32ELEN32: # %bb.0:
497 ; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
498 ; RV32ELEN32-NEXT: vmv.v.x v8, a0
499 ; RV32ELEN32-NEXT: vslide1down.vx v8, v8, a1
500 ; RV32ELEN32-NEXT: ret
502 ; RV64ELEN32-LABEL: bitcast_i64_v4i16:
503 ; RV64ELEN32: # %bb.0:
504 ; RV64ELEN32-NEXT: addi sp, sp, -16
505 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
506 ; RV64ELEN32-NEXT: sd a0, 8(sp)
507 ; RV64ELEN32-NEXT: addi a0, sp, 8
508 ; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
509 ; RV64ELEN32-NEXT: vle16.v v8, (a0)
510 ; RV64ELEN32-NEXT: addi sp, sp, 16
511 ; RV64ELEN32-NEXT: ret
512 %b = bitcast i64 %a to <4 x i16>
516 define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
517 ; RV32-LABEL: bitcast_i64_v2i32:
519 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
520 ; RV32-NEXT: vslide1down.vx v8, v8, a0
521 ; RV32-NEXT: vslide1down.vx v8, v8, a1
524 ; RV64-LABEL: bitcast_i64_v2i32:
526 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
527 ; RV64-NEXT: vmv.s.x v8, a0
530 ; RV32ELEN32-LABEL: bitcast_i64_v2i32:
531 ; RV32ELEN32: # %bb.0:
532 ; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
533 ; RV32ELEN32-NEXT: vmv.v.x v8, a0
534 ; RV32ELEN32-NEXT: vslide1down.vx v8, v8, a1
535 ; RV32ELEN32-NEXT: ret
537 ; RV64ELEN32-LABEL: bitcast_i64_v2i32:
538 ; RV64ELEN32: # %bb.0:
539 ; RV64ELEN32-NEXT: addi sp, sp, -16
540 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16
541 ; RV64ELEN32-NEXT: sd a0, 8(sp)
542 ; RV64ELEN32-NEXT: addi a0, sp, 8
543 ; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
544 ; RV64ELEN32-NEXT: vle32.v v8, (a0)
545 ; RV64ELEN32-NEXT: addi sp, sp, 16
546 ; RV64ELEN32-NEXT: ret
547 %b = bitcast i64 %a to <2 x i32>
551 define <1 x i64> @bitcast_i64_v1i64(i64 %a) {
552 ; RV32-LABEL: bitcast_i64_v1i64:
554 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
555 ; RV32-NEXT: vslide1down.vx v8, v8, a0
556 ; RV32-NEXT: vslide1down.vx v8, v8, a1
559 ; RV64-LABEL: bitcast_i64_v1i64:
561 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
562 ; RV64-NEXT: vmv.s.x v8, a0
565 ; ELEN32-LABEL: bitcast_i64_v1i64:
568 %b = bitcast i64 %a to <1 x i64>