1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
4 define <vscale x 1 x i1> @test_vp_reverse_nxv1i1_masked(<vscale x 1 x i1> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
5 ; CHECK-LABEL: test_vp_reverse_nxv1i1_masked:
7 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
8 ; CHECK-NEXT: vmv.v.i v9, 0
9 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
10 ; CHECK-NEXT: vmv1r.v v0, v8
11 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
12 ; CHECK-NEXT: vid.v v10, v0.t
13 ; CHECK-NEXT: addi a0, a0, -1
14 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
15 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
16 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
17 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
19 %dst = call <vscale x 1 x i1> @llvm.experimental.vp.reverse.nxv1i1(<vscale x 1 x i1> %src, <vscale x 1 x i1> %mask, i32 %evl)
20 ret <vscale x 1 x i1> %dst
23 define <vscale x 1 x i1> @test_vp_reverse_nxv1i1(<vscale x 1 x i1> %src, i32 zeroext %evl) {
24 ; CHECK-LABEL: test_vp_reverse_nxv1i1:
26 ; CHECK-NEXT: addi a1, a0, -1
27 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
28 ; CHECK-NEXT: vid.v v8
29 ; CHECK-NEXT: vrsub.vx v8, v8, a1
30 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
31 ; CHECK-NEXT: vmv.v.i v9, 0
32 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
33 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
34 ; CHECK-NEXT: vmsne.vi v0, v10, 0
37 %dst = call <vscale x 1 x i1> @llvm.experimental.vp.reverse.nxv1i1(<vscale x 1 x i1> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
38 ret <vscale x 1 x i1> %dst
41 define <vscale x 2 x i1> @test_vp_reverse_nxv2i1_masked(<vscale x 2 x i1> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
42 ; CHECK-LABEL: test_vp_reverse_nxv2i1_masked:
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
45 ; CHECK-NEXT: vmv.v.i v9, 0
46 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
47 ; CHECK-NEXT: vmv1r.v v0, v8
48 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
49 ; CHECK-NEXT: vid.v v10, v0.t
50 ; CHECK-NEXT: addi a0, a0, -1
51 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
52 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
53 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
54 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
56 %dst = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %src, <vscale x 2 x i1> %mask, i32 %evl)
57 ret <vscale x 2 x i1> %dst
60 define <vscale x 2 x i1> @test_vp_reverse_nxv2i1(<vscale x 2 x i1> %src, i32 zeroext %evl) {
61 ; CHECK-LABEL: test_vp_reverse_nxv2i1:
63 ; CHECK-NEXT: addi a1, a0, -1
64 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
65 ; CHECK-NEXT: vid.v v8
66 ; CHECK-NEXT: vrsub.vx v8, v8, a1
67 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
68 ; CHECK-NEXT: vmv.v.i v9, 0
69 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
70 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
71 ; CHECK-NEXT: vmsne.vi v0, v10, 0
74 %dst = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
75 ret <vscale x 2 x i1> %dst
78 define <vscale x 4 x i1> @test_vp_reverse_nxv4i1_masked(<vscale x 4 x i1> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
79 ; CHECK-LABEL: test_vp_reverse_nxv4i1_masked:
81 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
82 ; CHECK-NEXT: vmv.v.i v9, 0
83 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
84 ; CHECK-NEXT: vmv1r.v v0, v8
85 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
86 ; CHECK-NEXT: vid.v v10, v0.t
87 ; CHECK-NEXT: addi a0, a0, -1
88 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
89 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
90 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
91 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
93 %dst = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> %src, <vscale x 4 x i1> %mask, i32 %evl)
94 ret <vscale x 4 x i1> %dst
97 define <vscale x 4 x i1> @test_vp_reverse_nxv4i1(<vscale x 4 x i1> %src, i32 zeroext %evl) {
98 ; CHECK-LABEL: test_vp_reverse_nxv4i1:
100 ; CHECK-NEXT: addi a1, a0, -1
101 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
102 ; CHECK-NEXT: vid.v v8
103 ; CHECK-NEXT: vrsub.vx v8, v8, a1
104 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
105 ; CHECK-NEXT: vmv.v.i v9, 0
106 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
107 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
108 ; CHECK-NEXT: vmsne.vi v0, v10, 0
111 %dst = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
112 ret <vscale x 4 x i1> %dst
115 define <vscale x 8 x i1> @test_vp_reverse_nxv8i1_masked(<vscale x 8 x i1> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
116 ; CHECK-LABEL: test_vp_reverse_nxv8i1_masked:
118 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
119 ; CHECK-NEXT: vmv.v.i v9, 0
120 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
121 ; CHECK-NEXT: vmv1r.v v0, v8
122 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
123 ; CHECK-NEXT: vid.v v10, v0.t
124 ; CHECK-NEXT: addi a0, a0, -1
125 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
126 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
127 ; CHECK-NEXT: vrgatherei16.vv v12, v9, v10, v0.t
128 ; CHECK-NEXT: vmsne.vi v0, v12, 0, v0.t
130 %dst = call <vscale x 8 x i1> @llvm.experimental.vp.reverse.nxv8i1(<vscale x 8 x i1> %src, <vscale x 8 x i1> %mask, i32 %evl)
131 ret <vscale x 8 x i1> %dst
134 define <vscale x 8 x i1> @test_vp_reverse_nxv8i1(<vscale x 8 x i1> %src, i32 zeroext %evl) {
135 ; CHECK-LABEL: test_vp_reverse_nxv8i1:
137 ; CHECK-NEXT: addi a1, a0, -1
138 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
139 ; CHECK-NEXT: vid.v v8
140 ; CHECK-NEXT: vrsub.vx v8, v8, a1
141 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
142 ; CHECK-NEXT: vmv.v.i v10, 0
143 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
144 ; CHECK-NEXT: vrgatherei16.vv v11, v10, v8
145 ; CHECK-NEXT: vmsne.vi v0, v11, 0
148 %dst = call <vscale x 8 x i1> @llvm.experimental.vp.reverse.nxv8i1(<vscale x 8 x i1> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
149 ret <vscale x 8 x i1> %dst
152 define <vscale x 16 x i1> @test_vp_reverse_nxv16i1_masked(<vscale x 16 x i1> %src, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
153 ; CHECK-LABEL: test_vp_reverse_nxv16i1_masked:
155 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
156 ; CHECK-NEXT: vmv.v.i v10, 0
157 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
158 ; CHECK-NEXT: vmv1r.v v0, v8
159 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
160 ; CHECK-NEXT: vid.v v12, v0.t
161 ; CHECK-NEXT: addi a0, a0, -1
162 ; CHECK-NEXT: vrsub.vx v12, v12, a0, v0.t
163 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
164 ; CHECK-NEXT: vrgatherei16.vv v16, v10, v12, v0.t
165 ; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
166 ; CHECK-NEXT: vmv1r.v v0, v8
168 %dst = call <vscale x 16 x i1> @llvm.experimental.vp.reverse.nxv16i1(<vscale x 16 x i1> %src, <vscale x 16 x i1> %mask, i32 %evl)
169 ret <vscale x 16 x i1> %dst
172 define <vscale x 16 x i1> @test_vp_reverse_nxv16i1(<vscale x 16 x i1> %src, i32 zeroext %evl) {
173 ; CHECK-LABEL: test_vp_reverse_nxv16i1:
175 ; CHECK-NEXT: addi a1, a0, -1
176 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
177 ; CHECK-NEXT: vid.v v8
178 ; CHECK-NEXT: vrsub.vx v8, v8, a1
179 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
180 ; CHECK-NEXT: vmv.v.i v12, 0
181 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
182 ; CHECK-NEXT: vrgatherei16.vv v14, v12, v8
183 ; CHECK-NEXT: vmsne.vi v0, v14, 0
186 %dst = call <vscale x 16 x i1> @llvm.experimental.vp.reverse.nxv16i1(<vscale x 16 x i1> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
187 ret <vscale x 16 x i1> %dst
190 define <vscale x 32 x i1> @test_vp_reverse_nxv32i1_masked(<vscale x 32 x i1> %src, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
191 ; CHECK-LABEL: test_vp_reverse_nxv32i1_masked:
193 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
194 ; CHECK-NEXT: vmv.v.i v12, 0
195 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
196 ; CHECK-NEXT: vmv1r.v v0, v8
197 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
198 ; CHECK-NEXT: vid.v v16, v0.t
199 ; CHECK-NEXT: addi a0, a0, -1
200 ; CHECK-NEXT: vrsub.vx v16, v16, a0, v0.t
201 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
202 ; CHECK-NEXT: vrgatherei16.vv v24, v12, v16, v0.t
203 ; CHECK-NEXT: vmsne.vi v8, v24, 0, v0.t
204 ; CHECK-NEXT: vmv1r.v v0, v8
206 %dst = call <vscale x 32 x i1> @llvm.experimental.vp.reverse.nxv32i1(<vscale x 32 x i1> %src, <vscale x 32 x i1> %mask, i32 %evl)
207 ret <vscale x 32 x i1> %dst
210 define <vscale x 32 x i1> @test_vp_reverse_nxv32i1(<vscale x 32 x i1> %src, i32 zeroext %evl) {
211 ; CHECK-LABEL: test_vp_reverse_nxv32i1:
213 ; CHECK-NEXT: addi a1, a0, -1
214 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
215 ; CHECK-NEXT: vid.v v8
216 ; CHECK-NEXT: vrsub.vx v8, v8, a1
217 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
218 ; CHECK-NEXT: vmv.v.i v16, 0
219 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
220 ; CHECK-NEXT: vrgatherei16.vv v20, v16, v8
221 ; CHECK-NEXT: vmsne.vi v0, v20, 0
224 %dst = call <vscale x 32 x i1> @llvm.experimental.vp.reverse.nxv32i1(<vscale x 32 x i1> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl)
225 ret <vscale x 32 x i1> %dst
228 define <vscale x 64 x i1> @test_vp_reverse_nxv64i1_masked(<vscale x 64 x i1> %src, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
229 ; CHECK-LABEL: test_vp_reverse_nxv64i1_masked:
231 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
232 ; CHECK-NEXT: vmv.v.i v16, 0
233 ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
234 ; CHECK-NEXT: csrr a1, vlenb
235 ; CHECK-NEXT: slli a2, a1, 2
236 ; CHECK-NEXT: addi a2, a2, -1
237 ; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
238 ; CHECK-NEXT: vid.v v16
239 ; CHECK-NEXT: vrsub.vx v0, v16, a2
240 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
241 ; CHECK-NEXT: vrgatherei16.vv v20, v24, v0
242 ; CHECK-NEXT: vrgatherei16.vv v16, v28, v0
243 ; CHECK-NEXT: slli a1, a1, 3
244 ; CHECK-NEXT: sub a1, a1, a0
245 ; CHECK-NEXT: vmv1r.v v0, v8
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
247 ; CHECK-NEXT: vslidedown.vx v16, v16, a1, v0.t
248 ; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
249 ; CHECK-NEXT: vmv1r.v v0, v8
251 %dst = call <vscale x 64 x i1> @llvm.experimental.vp.reverse.nxv64i1(<vscale x 64 x i1> %src, <vscale x 64 x i1> %mask, i32 %evl)
252 ret <vscale x 64 x i1> %dst
255 define <vscale x 64 x i1> @test_vp_reverse_nxv64i1(<vscale x 64 x i1> %src, i32 zeroext %evl) {
256 ; CHECK-LABEL: test_vp_reverse_nxv64i1:
258 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
259 ; CHECK-NEXT: vmv.v.i v8, 0
260 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
261 ; CHECK-NEXT: csrr a1, vlenb
262 ; CHECK-NEXT: slli a2, a1, 2
263 ; CHECK-NEXT: addi a2, a2, -1
264 ; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
265 ; CHECK-NEXT: vid.v v16
266 ; CHECK-NEXT: vrsub.vx v16, v16, a2
267 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
268 ; CHECK-NEXT: vrgatherei16.vv v28, v8, v16
269 ; CHECK-NEXT: vrgatherei16.vv v24, v12, v16
270 ; CHECK-NEXT: slli a1, a1, 3
271 ; CHECK-NEXT: sub a1, a1, a0
272 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
273 ; CHECK-NEXT: vslidedown.vx v8, v24, a1
274 ; CHECK-NEXT: vmsne.vi v0, v8, 0
277 %dst = call <vscale x 64 x i1> @llvm.experimental.vp.reverse.nxv64i1(<vscale x 64 x i1> %src, <vscale x 64 x i1> splat (i1 1), i32 %evl)
278 ret <vscale x 64 x i1> %dst
281 declare <vscale x 1 x i1> @llvm.experimental.vp.reverse.nxv1i1(<vscale x 1 x i1>,<vscale x 1 x i1>,i32)
282 declare <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1>,<vscale x 2 x i1>,i32)
283 declare <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1>,<vscale x 4 x i1>,i32)
284 declare <vscale x 8 x i1> @llvm.experimental.vp.reverse.nxv8i1(<vscale x 8 x i1>,<vscale x 8 x i1>,i32)
285 declare <vscale x 16 x i1> @llvm.experimental.vp.reverse.nxv16i1(<vscale x 16 x i1>,<vscale x 16 x i1>,i32)
286 declare <vscale x 32 x i1> @llvm.experimental.vp.reverse.nxv32i1(<vscale x 32 x i1>,<vscale x 32 x i1>,i32)
287 declare <vscale x 64 x i1> @llvm.experimental.vp.reverse.nxv64i1(<vscale x 64 x i1>,<vscale x 64 x i1>,i32)