1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -O2 -mtriple riscv64 -mattr=+v,+m,+zbb -enable-subreg-liveness \
3 ; RUN: -verify-machineinstrs < %s \
6 @var_47 = dso_local global [2 x i16] [i16 -32732, i16 19439], align 2
7 @__const._Z3foov.var_49 = private unnamed_addr constant [2 x i16] [i16 157, i16 24062], align 2
8 @__const._Z3foov.var_48 = private unnamed_addr constant [2 x i8] c"\AEN", align 1
9 @__const._Z3foov.var_46 = private unnamed_addr constant [2 x i16] [i16 729, i16 -32215], align 2
10 @__const._Z3foov.var_45 = private unnamed_addr constant [2 x i16] [i16 -27462, i16 -1435], align 2
11 @__const._Z3foov.var_44 = private unnamed_addr constant [2 x i16] [i16 22611, i16 -18435], align 2
12 @__const._Z3foov.var_40 = private unnamed_addr constant [2 x i16] [i16 -19932, i16 -26252], align 2
14 define void @_Z3foov() {
15 ; CHECK-LABEL: _Z3foov:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: addi sp, sp, -16
18 ; CHECK-NEXT: .cfi_def_cfa_offset 16
19 ; CHECK-NEXT: csrr a0, vlenb
20 ; CHECK-NEXT: li a1, 10
21 ; CHECK-NEXT: mul a0, a0, a1
22 ; CHECK-NEXT: sub sp, sp, a0
23 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb
24 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
25 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
26 ; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
27 ; CHECK-NEXT: vle16.v v8, (a0)
28 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
29 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
30 ; CHECK-NEXT: vle8.v v10, (a0)
31 ; CHECK-NEXT: csrr a0, vlenb
32 ; CHECK-NEXT: slli a0, a0, 3
33 ; CHECK-NEXT: add a0, sp, a0
34 ; CHECK-NEXT: addi a0, a0, 16
35 ; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
36 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
37 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
38 ; CHECK-NEXT: vle16.v v10, (a0)
39 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
40 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
41 ; CHECK-NEXT: vle16.v v12, (a0)
42 ; CHECK-NEXT: addi a0, sp, 16
43 ; CHECK-NEXT: csrr a1, vlenb
44 ; CHECK-NEXT: slli a1, a1, 1
45 ; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
46 ; CHECK-NEXT: add a0, a0, a1
47 ; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
48 ; CHECK-NEXT: add a0, a0, a1
49 ; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
50 ; CHECK-NEXT: add a0, a0, a1
51 ; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
54 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
55 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
56 ; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
57 ; CHECK-NEXT: vle16.v v8, (a0)
58 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
59 ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
60 ; CHECK-NEXT: addi a1, sp, 16
61 ; CHECK-NEXT: csrr a2, vlenb
62 ; CHECK-NEXT: slli a2, a2, 1
63 ; CHECK-NEXT: vl2r.v v10, (a1) # Unknown-size Folded Reload
64 ; CHECK-NEXT: add a1, a1, a2
65 ; CHECK-NEXT: vl2r.v v12, (a1) # Unknown-size Folded Reload
66 ; CHECK-NEXT: add a1, a1, a2
67 ; CHECK-NEXT: vl2r.v v14, (a1) # Unknown-size Folded Reload
68 ; CHECK-NEXT: add a1, a1, a2
69 ; CHECK-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload
70 ; CHECK-NEXT: vle16.v v16, (a0)
71 ; CHECK-NEXT: lui a0, 1048572
72 ; CHECK-NEXT: addi a0, a0, 928
73 ; CHECK-NEXT: vmsbc.vx v0, v8, a0
74 ; CHECK-NEXT: csrr a0, vlenb
75 ; CHECK-NEXT: slli a0, a0, 3
76 ; CHECK-NEXT: add a0, sp, a0
77 ; CHECK-NEXT: addi a0, a0, 16
78 ; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
79 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
80 ; CHECK-NEXT: vsext.vf2 v10, v8, v0.t
81 ; CHECK-NEXT: lui a0, %hi(var_47)
82 ; CHECK-NEXT: addi a0, a0, %lo(var_47)
83 ; CHECK-NEXT: vsseg4e16.v v10, (a0)
84 ; CHECK-NEXT: csrr a0, vlenb
85 ; CHECK-NEXT: li a1, 10
86 ; CHECK-NEXT: mul a0, a0, a1
87 ; CHECK-NEXT: add sp, sp, a0
88 ; CHECK-NEXT: addi sp, sp, 16
91 %0 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_49, i64 2)
92 %1 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> undef, ptr nonnull @__const._Z3foov.var_48, i64 2)
93 %2 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_46, i64 2)
94 %3 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_45, i64 2)
95 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() #2
96 %4 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_44, i64 2)
97 %5 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
98 %6 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_40, i64 2)
99 %7 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
100 %8 = tail call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> %6, i16 -15456, i64 2)
101 %9 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
102 %10 = tail call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %8, i64 2, i64 0)
103 tail call void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16> %10, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, <vscale x 8 x i16> %4, ptr nonnull @var_47, i64 2)
107 declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16>, ptr nocapture, i64)
109 declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>, ptr nocapture, i64)
111 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
113 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16>, i16, i64)
115 declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64 immarg)
117 declare void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, ptr nocapture, i64)