1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
3 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
4 ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-128-65536
6 ; RUN: llc -mtriple=riscv32 -riscv-v-vector-bits-max=512 \
7 ; RUN: -mattr=+v,+zvl512b -verify-machineinstrs \
8 ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-512
10 ; RUN: llc -mtriple=riscv32 -riscv-v-vector-bits-max=64 \
11 ; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \
12 ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64
14 declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
20 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1(<vscale x 1 x i64> %0, i64 %1) nounwind {
21 ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
24 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
25 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
28 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
29 <vscale x 1 x i64> undef,
30 <vscale x 1 x i64> %0,
34 ret <vscale x 1 x i64> %a
37 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2(<vscale x 1 x i64> %0, i64 %1) nounwind {
38 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
39 ; CHECK-128-65536: # %bb.0: # %entry
40 ; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, ma
41 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
42 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
43 ; CHECK-128-65536-NEXT: ret
45 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
46 ; CHECK-512: # %bb.0: # %entry
47 ; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, ma
48 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
49 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
52 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
53 ; CHECK-64: # %bb.0: # %entry
54 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
55 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
56 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
59 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
60 <vscale x 1 x i64> undef,
61 <vscale x 1 x i64> %0,
65 ret <vscale x 1 x i64> %a
68 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3(<vscale x 1 x i64> %0, i64 %1) nounwind {
69 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
70 ; CHECK-128-65536: # %bb.0: # %entry
71 ; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, ma
72 ; CHECK-128-65536-NEXT: slli a2, a2, 1
73 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
74 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
75 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
76 ; CHECK-128-65536-NEXT: ret
78 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
79 ; CHECK-512: # %bb.0: # %entry
80 ; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, ma
81 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
82 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
85 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
86 ; CHECK-64: # %bb.0: # %entry
87 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
88 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
89 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
92 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
93 <vscale x 1 x i64> undef,
94 <vscale x 1 x i64> %0,
98 ret <vscale x 1 x i64> %a
101 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8(<vscale x 1 x i64> %0, i64 %1) nounwind {
102 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
103 ; CHECK-128-65536: # %bb.0: # %entry
104 ; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, ma
105 ; CHECK-128-65536-NEXT: slli a2, a2, 1
106 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
107 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
108 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
109 ; CHECK-128-65536-NEXT: ret
111 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
112 ; CHECK-512: # %bb.0: # %entry
113 ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
114 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
115 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
116 ; CHECK-512-NEXT: ret
118 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
119 ; CHECK-64: # %bb.0: # %entry
120 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
121 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
122 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
125 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
126 <vscale x 1 x i64> undef,
127 <vscale x 1 x i64> %0,
131 ret <vscale x 1 x i64> %a
134 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vscale x 1 x i64> %0, i64 %1) nounwind {
135 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
136 ; CHECK-128-65536: # %bb.0: # %entry
137 ; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, ma
138 ; CHECK-128-65536-NEXT: slli a2, a2, 1
139 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
140 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
141 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
142 ; CHECK-128-65536-NEXT: ret
144 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
145 ; CHECK-512: # %bb.0: # %entry
146 ; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, ma
147 ; CHECK-512-NEXT: slli a2, a2, 1
148 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma
149 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
150 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
151 ; CHECK-512-NEXT: ret
153 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
154 ; CHECK-64: # %bb.0: # %entry
155 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
156 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
157 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
160 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
161 <vscale x 1 x i64> undef,
162 <vscale x 1 x i64> %0,
166 ret <vscale x 1 x i64> %a
169 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vscale x 1 x i64> %0, i64 %1) nounwind {
170 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
171 ; CHECK-128-65536: # %bb.0: # %entry
172 ; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, ma
173 ; CHECK-128-65536-NEXT: slli a2, a2, 1
174 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
175 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
176 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
177 ; CHECK-128-65536-NEXT: ret
179 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
180 ; CHECK-512: # %bb.0: # %entry
181 ; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, ma
182 ; CHECK-512-NEXT: slli a2, a2, 1
183 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma
184 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
185 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
186 ; CHECK-512-NEXT: ret
188 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
189 ; CHECK-64: # %bb.0: # %entry
190 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
191 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
192 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
195 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
196 <vscale x 1 x i64> undef,
197 <vscale x 1 x i64> %0,
201 ret <vscale x 1 x i64> %a
204 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16(<vscale x 1 x i64> %0, i64 %1) nounwind {
205 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
206 ; CHECK-128-65536: # %bb.0: # %entry
207 ; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, ma
208 ; CHECK-128-65536-NEXT: slli a2, a2, 1
209 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
210 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
211 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
212 ; CHECK-128-65536-NEXT: ret
214 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
215 ; CHECK-512: # %bb.0: # %entry
216 ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
217 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
218 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
219 ; CHECK-512-NEXT: ret
221 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
222 ; CHECK-64: # %bb.0: # %entry
223 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
224 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
225 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
228 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
229 <vscale x 1 x i64> undef,
230 <vscale x 1 x i64> %0,
234 ret <vscale x 1 x i64> %a
237 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(<vscale x 1 x i64> %0, i64 %1) nounwind {
238 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
239 ; CHECK-128-65536: # %bb.0: # %entry
240 ; CHECK-128-65536-NEXT: li a2, 2047
241 ; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, ma
242 ; CHECK-128-65536-NEXT: slli a2, a2, 1
243 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma
244 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
245 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
246 ; CHECK-128-65536-NEXT: ret
248 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
249 ; CHECK-512: # %bb.0: # %entry
250 ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
251 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
252 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
253 ; CHECK-512-NEXT: ret
255 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
256 ; CHECK-64: # %bb.0: # %entry
257 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
258 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
259 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
262 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
263 <vscale x 1 x i64> undef,
264 <vscale x 1 x i64> %0,
268 ret <vscale x 1 x i64> %a
271 define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048(<vscale x 1 x i64> %0, i64 %1) nounwind {
272 ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
273 ; CHECK-128-65536: # %bb.0: # %entry
274 ; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma
275 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
276 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
277 ; CHECK-128-65536-NEXT: ret
279 ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
280 ; CHECK-512: # %bb.0: # %entry
281 ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
282 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
283 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
284 ; CHECK-512-NEXT: ret
286 ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
287 ; CHECK-64: # %bb.0: # %entry
288 ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
289 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
290 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
293 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
294 <vscale x 1 x i64> undef,
295 <vscale x 1 x i64> %0,
299 ret <vscale x 1 x i64> %a