1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
4 define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
7 ; CHECK-NEXT: csrr a1, vlenb
8 ; CHECK-NEXT: slli a2, a1, 4
9 ; CHECK-NEXT: addi a2, a2, -1
10 ; CHECK-NEXT: bltu a0, a2, .LBB0_2
11 ; CHECK-NEXT: # %bb.1:
12 ; CHECK-NEXT: mv a0, a2
13 ; CHECK-NEXT: .LBB0_2:
14 ; CHECK-NEXT: addi sp, sp, -80
15 ; CHECK-NEXT: .cfi_def_cfa_offset 80
16 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
17 ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
18 ; CHECK-NEXT: .cfi_offset ra, -8
19 ; CHECK-NEXT: .cfi_offset s0, -16
20 ; CHECK-NEXT: addi s0, sp, 80
21 ; CHECK-NEXT: .cfi_def_cfa s0, 0
22 ; CHECK-NEXT: csrr a2, vlenb
23 ; CHECK-NEXT: slli a2, a2, 4
24 ; CHECK-NEXT: sub sp, sp, a2
25 ; CHECK-NEXT: andi sp, sp, -64
26 ; CHECK-NEXT: addi a2, sp, 64
27 ; CHECK-NEXT: add a0, a2, a0
28 ; CHECK-NEXT: slli a1, a1, 3
29 ; CHECK-NEXT: add a1, a2, a1
30 ; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
31 ; CHECK-NEXT: vmv.v.i v16, 0
32 ; CHECK-NEXT: vmv1r.v v0, v9
33 ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
34 ; CHECK-NEXT: vs8r.v v24, (a1)
35 ; CHECK-NEXT: vmv1r.v v0, v8
36 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
37 ; CHECK-NEXT: vs8r.v v8, (a2)
38 ; CHECK-NEXT: lbu a0, 0(a0)
39 ; CHECK-NEXT: addi sp, s0, -80
40 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
41 ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
42 ; CHECK-NEXT: addi sp, sp, 80
44 %a = bitcast <vscale x 16 x i8> %x to <vscale x 128 x i1>
45 %b = extractelement <vscale x 128 x i1> %a, i64 %y
49 define i8 @bar(<vscale x 128 x i1> %x, i64 %y) {
52 ; CHECK-NEXT: vmv1r.v v1, v8
53 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
54 ; CHECK-NEXT: vslidedown.vx v8, v0, a0
55 ; CHECK-NEXT: vmv.x.s a0, v8
57 %a = bitcast <vscale x 128 x i1> %x to <vscale x 16 x i8>
58 %b = extractelement <vscale x 16 x i8> %a, i64 %y