1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple riscv64 -mattr=+v -enable-subreg-liveness < %s | FileCheck %s
4 define <vscale x 2 x float> @vrgather_all_undef(ptr %p) {
5 ; CHECK-LABEL: vrgather_all_undef:
6 ; CHECK: # %bb.0: # %entry
7 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma
8 ; CHECK-NEXT: vrgather.vi v8, v9, 0
11 %0 = tail call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, i64 0, i64 0)
12 ret <vscale x 2 x float> %0
15 define dso_local signext i32 @undef_early_clobber_chain() {
16 ; CHECK-LABEL: undef_early_clobber_chain:
17 ; CHECK: # %bb.0: # %entry
18 ; CHECK-NEXT: addi sp, sp, -400
19 ; CHECK-NEXT: .cfi_def_cfa_offset 400
20 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma
21 ; CHECK-NEXT: vrgather.vi v9, v8, 0
22 ; CHECK-NEXT: mv a0, sp
23 ; CHECK-NEXT: vse32.v v9, (a0)
24 ; CHECK-NEXT: li a0, 0
25 ; CHECK-NEXT: addi sp, sp, 400
26 ; CHECK-NEXT: .cfi_def_cfa_offset 0
29 %dst = alloca [100 x float], align 8
30 call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %dst) #4
31 %0 = tail call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, i64 0, i64 0)
32 call void @llvm.riscv.vse.nxv2f32.i64(<vscale x 2 x float> %0, ptr nonnull %dst, i64 0)
33 call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %dst) #4
37 define internal void @SubRegLivenessUndefInPhi(i64 %cond) {
38 ; CHECK-LABEL: SubRegLivenessUndefInPhi:
39 ; CHECK: # %bb.0: # %start
40 ; CHECK-NEXT: blez a0, .LBB2_2
41 ; CHECK-NEXT: # %bb.1: # %Cond1
42 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
43 ; CHECK-NEXT: vid.v v8
44 ; CHECK-NEXT: vadd.vi v12, v8, 1
45 ; CHECK-NEXT: vadd.vi v10, v8, 3
46 ; CHECK-NEXT: j .LBB2_3
47 ; CHECK-NEXT: .LBB2_2: # %Cond2
48 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
49 ; CHECK-NEXT: vid.v v9
50 ; CHECK-NEXT: csrr a0, vlenb
51 ; CHECK-NEXT: srli a0, a0, 3
52 ; CHECK-NEXT: vadd.vi v10, v9, 1
53 ; CHECK-NEXT: vadd.vi v11, v9, 3
54 ; CHECK-NEXT: add a1, a0, a0
55 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
56 ; CHECK-NEXT: vslideup.vx v8, v9, a0
57 ; CHECK-NEXT: vslideup.vx v12, v10, a0
58 ; CHECK-NEXT: vslideup.vx v10, v11, a0
59 ; CHECK-NEXT: .LBB2_3: # %UseSR
60 ; CHECK-NEXT: vl1r.v v14, (zero)
61 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma
62 ; CHECK-NEXT: vrgatherei16.vv v15, v14, v8
63 ; CHECK-NEXT: vrgatherei16.vv v8, v14, v12
64 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
65 ; CHECK-NEXT: vand.vv v8, v15, v8
66 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma
67 ; CHECK-NEXT: vrgatherei16.vv v9, v14, v10
68 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
69 ; CHECK-NEXT: vand.vv v8, v8, v9
70 ; CHECK-NEXT: vs1r.v v8, (zero)
73 %0 = icmp sgt i64 %cond, 0
74 br i1 %0, label %Cond1, label %Cond2
76 Cond1: ; preds = %start
77 %v15 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
78 %v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
79 %vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
80 %v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
81 %vs16.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 3)
82 %v20 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i, i64 0)
85 Cond2: ; preds = %start
86 %v15.2 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
87 %v17.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15.2, i64 1)
88 %vs12.i.i.i.2 = add <vscale x 1 x i16> %v15.2, splat (i16 1)
89 %v18.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i.2, i64 1)
90 %vs16.i.i.i.2 = add <vscale x 1 x i16> %v15.2, splat (i16 3)
91 %v20.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i.2, i64 1)
94 UseSR: ; preds = %Cond1, Cond2
95 %v17.3 = phi <vscale x 8 x i16> [ %v17, %Cond1 ], [ %v17.2, %Cond2 ]
96 %v18.3 = phi <vscale x 8 x i16> [ %v18, %Cond1 ], [ %v18.2, %Cond2 ]
97 %v20.3 = phi <vscale x 8 x i16> [ %v20, %Cond1 ], [ %v20.2, %Cond2 ]
98 %v37 = load <vscale x 8 x i8>, ptr addrspace(1) null, align 8
99 %v38 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v17.3, i64 4)
100 %v40 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v18.3, i64 4)
101 %v42 = and <vscale x 8 x i8> %v38, %v40
102 %v46 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v20.3, i64 4)
103 %v60 = and <vscale x 8 x i8> %v42, %v46
104 store <vscale x 8 x i8> %v60, ptr addrspace(1) null, align 4
108 define internal void @SubRegLivenessUndef() {
109 ; CHECK-LABEL: SubRegLivenessUndef:
110 ; CHECK: # %bb.0: # %loopIR.preheader.i.i
111 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
112 ; CHECK-NEXT: vid.v v8
113 ; CHECK-NEXT: vadd.vi v10, v8, 1
114 ; CHECK-NEXT: vadd.vi v12, v8, 3
115 ; CHECK-NEXT: .LBB3_1: # %loopIR3.i.i
116 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
117 ; CHECK-NEXT: vl1r.v v14, (zero)
118 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma
119 ; CHECK-NEXT: vrgatherei16.vv v15, v14, v8
120 ; CHECK-NEXT: vrgatherei16.vv v9, v14, v10
121 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
122 ; CHECK-NEXT: vand.vv v9, v15, v9
123 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, ma
124 ; CHECK-NEXT: vrgatherei16.vv v11, v14, v12
125 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
126 ; CHECK-NEXT: vand.vv v9, v9, v11
127 ; CHECK-NEXT: vs1r.v v9, (zero)
128 ; CHECK-NEXT: j .LBB3_1
129 loopIR.preheader.i.i:
130 %v15 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
131 %v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
132 %vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
133 %v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
134 %vs16.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 3)
135 %v20 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs16.i.i.i, i64 0)
136 br label %loopIR3.i.i
138 loopIR3.i.i: ; preds = %loopIR3.i.i, %loopIR.preheader.i.i
139 %v37 = load <vscale x 8 x i8>, ptr addrspace(1) null, align 8
140 %v38 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v17, i64 4)
141 %v40 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v18, i64 4)
142 %v42 = and <vscale x 8 x i8> %v38, %v40
143 %v46 = tail call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> %v37, <vscale x 8 x i16> %v20, i64 4)
144 %v60 = and <vscale x 8 x i8> %v42, %v46
145 store <vscale x 8 x i8> %v60, ptr addrspace(1) null, align 4
146 br label %loopIR3.i.i
149 declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
150 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64) #2
151 declare void @llvm.riscv.vse.nxv2f32.i64(<vscale x 2 x float>, ptr nocapture, i64)
152 declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
153 declare <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
154 declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16>, <vscale x 1 x i16>, i64 immarg)
155 declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i16>, i64)
158 define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) {
159 ; CHECK-LABEL: repeat_shuffle:
161 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
162 ; CHECK-NEXT: vmv2r.v v10, v8
163 ; CHECK-NEXT: vslideup.vi v10, v8, 2
164 ; CHECK-NEXT: vse64.v v10, (a0)
166 %w = shufflevector <2 x double> %v, <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
167 store <4 x double> %w, ptr %q