1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-UNKNOWN
3 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-256
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-512
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-UNKNOWN
6 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-256
7 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-512
10 ; VECTOR_REVERSE - masks
13 define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
14 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i1:
15 ; RV32-BITS-UNKNOWN: # %bb.0:
16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
17 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
18 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
19 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
20 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2
21 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
22 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
23 ; RV32-BITS-UNKNOWN-NEXT: vid.v v9
24 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
25 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
26 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
27 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
28 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
29 ; RV32-BITS-UNKNOWN-NEXT: ret
31 ; RV32-BITS-256-LABEL: reverse_nxv2i1:
32 ; RV32-BITS-256: # %bb.0:
33 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
34 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
35 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
36 ; RV32-BITS-256-NEXT: csrr a0, vlenb
37 ; RV32-BITS-256-NEXT: srli a0, a0, 2
38 ; RV32-BITS-256-NEXT: addi a0, a0, -1
39 ; RV32-BITS-256-NEXT: vid.v v9
40 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
41 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
42 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
43 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
44 ; RV32-BITS-256-NEXT: ret
46 ; RV32-BITS-512-LABEL: reverse_nxv2i1:
47 ; RV32-BITS-512: # %bb.0:
48 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
49 ; RV32-BITS-512-NEXT: vmv.v.i v8, 0
50 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
51 ; RV32-BITS-512-NEXT: csrr a0, vlenb
52 ; RV32-BITS-512-NEXT: srli a0, a0, 2
53 ; RV32-BITS-512-NEXT: addi a0, a0, -1
54 ; RV32-BITS-512-NEXT: vid.v v9
55 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
56 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
57 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
58 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
59 ; RV32-BITS-512-NEXT: ret
61 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i1:
62 ; RV64-BITS-UNKNOWN: # %bb.0:
63 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
64 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
65 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
66 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
67 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2
68 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
69 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
70 ; RV64-BITS-UNKNOWN-NEXT: vid.v v9
71 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
72 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
73 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
74 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
75 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
76 ; RV64-BITS-UNKNOWN-NEXT: ret
78 ; RV64-BITS-256-LABEL: reverse_nxv2i1:
79 ; RV64-BITS-256: # %bb.0:
80 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
81 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
82 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
83 ; RV64-BITS-256-NEXT: csrr a0, vlenb
84 ; RV64-BITS-256-NEXT: srli a0, a0, 2
85 ; RV64-BITS-256-NEXT: addi a0, a0, -1
86 ; RV64-BITS-256-NEXT: vid.v v9
87 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
88 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
89 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
90 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
91 ; RV64-BITS-256-NEXT: ret
93 ; RV64-BITS-512-LABEL: reverse_nxv2i1:
94 ; RV64-BITS-512: # %bb.0:
95 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
96 ; RV64-BITS-512-NEXT: vmv.v.i v8, 0
97 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
98 ; RV64-BITS-512-NEXT: csrr a0, vlenb
99 ; RV64-BITS-512-NEXT: srli a0, a0, 2
100 ; RV64-BITS-512-NEXT: addi a0, a0, -1
101 ; RV64-BITS-512-NEXT: vid.v v9
102 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
103 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
104 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
105 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
106 ; RV64-BITS-512-NEXT: ret
107 %res = call <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1> %a)
108 ret <vscale x 2 x i1> %res
111 define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
112 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i1:
113 ; RV32-BITS-UNKNOWN: # %bb.0:
114 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
115 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
116 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
117 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
118 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1
119 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
120 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
121 ; RV32-BITS-UNKNOWN-NEXT: vid.v v9
122 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
123 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
124 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
125 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
126 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
127 ; RV32-BITS-UNKNOWN-NEXT: ret
129 ; RV32-BITS-256-LABEL: reverse_nxv4i1:
130 ; RV32-BITS-256: # %bb.0:
131 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
132 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
133 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
134 ; RV32-BITS-256-NEXT: csrr a0, vlenb
135 ; RV32-BITS-256-NEXT: srli a0, a0, 1
136 ; RV32-BITS-256-NEXT: addi a0, a0, -1
137 ; RV32-BITS-256-NEXT: vid.v v9
138 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
139 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
140 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
141 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
142 ; RV32-BITS-256-NEXT: ret
144 ; RV32-BITS-512-LABEL: reverse_nxv4i1:
145 ; RV32-BITS-512: # %bb.0:
146 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
147 ; RV32-BITS-512-NEXT: vmv.v.i v8, 0
148 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
149 ; RV32-BITS-512-NEXT: csrr a0, vlenb
150 ; RV32-BITS-512-NEXT: srli a0, a0, 1
151 ; RV32-BITS-512-NEXT: addi a0, a0, -1
152 ; RV32-BITS-512-NEXT: vid.v v9
153 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
154 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
155 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
156 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
157 ; RV32-BITS-512-NEXT: ret
159 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i1:
160 ; RV64-BITS-UNKNOWN: # %bb.0:
161 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
162 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
163 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
164 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
165 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1
166 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
167 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
168 ; RV64-BITS-UNKNOWN-NEXT: vid.v v9
169 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
170 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
171 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
172 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
173 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
174 ; RV64-BITS-UNKNOWN-NEXT: ret
176 ; RV64-BITS-256-LABEL: reverse_nxv4i1:
177 ; RV64-BITS-256: # %bb.0:
178 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
179 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
180 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
181 ; RV64-BITS-256-NEXT: csrr a0, vlenb
182 ; RV64-BITS-256-NEXT: srli a0, a0, 1
183 ; RV64-BITS-256-NEXT: addi a0, a0, -1
184 ; RV64-BITS-256-NEXT: vid.v v9
185 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
186 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
187 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
188 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
189 ; RV64-BITS-256-NEXT: ret
191 ; RV64-BITS-512-LABEL: reverse_nxv4i1:
192 ; RV64-BITS-512: # %bb.0:
193 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
194 ; RV64-BITS-512-NEXT: vmv.v.i v8, 0
195 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
196 ; RV64-BITS-512-NEXT: csrr a0, vlenb
197 ; RV64-BITS-512-NEXT: srli a0, a0, 1
198 ; RV64-BITS-512-NEXT: addi a0, a0, -1
199 ; RV64-BITS-512-NEXT: vid.v v9
200 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
201 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
202 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
203 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
204 ; RV64-BITS-512-NEXT: ret
205 %res = call <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
206 ret <vscale x 4 x i1> %res
209 define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
210 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i1:
211 ; RV32-BITS-UNKNOWN: # %bb.0:
212 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
213 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
214 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
215 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
216 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
217 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
218 ; RV32-BITS-UNKNOWN-NEXT: vid.v v10
219 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
220 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
221 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
222 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1
223 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
224 ; RV32-BITS-UNKNOWN-NEXT: ret
226 ; RV32-BITS-256-LABEL: reverse_nxv8i1:
227 ; RV32-BITS-256: # %bb.0:
228 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
229 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
230 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
231 ; RV32-BITS-256-NEXT: csrr a0, vlenb
232 ; RV32-BITS-256-NEXT: addi a0, a0, -1
233 ; RV32-BITS-256-NEXT: vid.v v9
234 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
235 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
236 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
237 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
238 ; RV32-BITS-256-NEXT: ret
240 ; RV32-BITS-512-LABEL: reverse_nxv8i1:
241 ; RV32-BITS-512: # %bb.0:
242 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
243 ; RV32-BITS-512-NEXT: vmv.v.i v8, 0
244 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
245 ; RV32-BITS-512-NEXT: csrr a0, vlenb
246 ; RV32-BITS-512-NEXT: addi a0, a0, -1
247 ; RV32-BITS-512-NEXT: vid.v v9
248 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
249 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
250 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
251 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
252 ; RV32-BITS-512-NEXT: ret
254 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1:
255 ; RV64-BITS-UNKNOWN: # %bb.0:
256 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
257 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
258 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
259 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
260 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
261 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
262 ; RV64-BITS-UNKNOWN-NEXT: vid.v v10
263 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
264 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
265 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
266 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1
267 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
268 ; RV64-BITS-UNKNOWN-NEXT: ret
270 ; RV64-BITS-256-LABEL: reverse_nxv8i1:
271 ; RV64-BITS-256: # %bb.0:
272 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
273 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
274 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
275 ; RV64-BITS-256-NEXT: csrr a0, vlenb
276 ; RV64-BITS-256-NEXT: addi a0, a0, -1
277 ; RV64-BITS-256-NEXT: vid.v v9
278 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
279 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
280 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
281 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
282 ; RV64-BITS-256-NEXT: ret
284 ; RV64-BITS-512-LABEL: reverse_nxv8i1:
285 ; RV64-BITS-512: # %bb.0:
286 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
287 ; RV64-BITS-512-NEXT: vmv.v.i v8, 0
288 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
289 ; RV64-BITS-512-NEXT: csrr a0, vlenb
290 ; RV64-BITS-512-NEXT: addi a0, a0, -1
291 ; RV64-BITS-512-NEXT: vid.v v9
292 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
293 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
294 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
295 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
296 ; RV64-BITS-512-NEXT: ret
297 %res = call <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1> %a)
298 ret <vscale x 8 x i1> %res
301 define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
302 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i1:
303 ; RV32-BITS-UNKNOWN: # %bb.0:
304 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma
305 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
306 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
307 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
308 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1
309 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
310 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
311 ; RV32-BITS-UNKNOWN-NEXT: vid.v v12
312 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0
313 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma
314 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
315 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
316 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
317 ; RV32-BITS-UNKNOWN-NEXT: ret
319 ; RV32-BITS-256-LABEL: reverse_nxv16i1:
320 ; RV32-BITS-256: # %bb.0:
321 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma
322 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
323 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
324 ; RV32-BITS-256-NEXT: csrr a0, vlenb
325 ; RV32-BITS-256-NEXT: slli a0, a0, 1
326 ; RV32-BITS-256-NEXT: addi a0, a0, -1
327 ; RV32-BITS-256-NEXT: vid.v v10
328 ; RV32-BITS-256-NEXT: vrsub.vx v10, v10, a0
329 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v10
330 ; RV32-BITS-256-NEXT: vand.vi v8, v12, 1
331 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
332 ; RV32-BITS-256-NEXT: ret
334 ; RV32-BITS-512-LABEL: reverse_nxv16i1:
335 ; RV32-BITS-512: # %bb.0:
336 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma
337 ; RV32-BITS-512-NEXT: vmv.v.i v8, 0
338 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
339 ; RV32-BITS-512-NEXT: csrr a0, vlenb
340 ; RV32-BITS-512-NEXT: slli a0, a0, 1
341 ; RV32-BITS-512-NEXT: addi a0, a0, -1
342 ; RV32-BITS-512-NEXT: vid.v v10
343 ; RV32-BITS-512-NEXT: vrsub.vx v10, v10, a0
344 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v10
345 ; RV32-BITS-512-NEXT: vand.vi v8, v12, 1
346 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
347 ; RV32-BITS-512-NEXT: ret
349 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i1:
350 ; RV64-BITS-UNKNOWN: # %bb.0:
351 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma
352 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
353 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
354 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
355 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1
356 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
357 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
358 ; RV64-BITS-UNKNOWN-NEXT: vid.v v12
359 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0
360 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma
361 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
362 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
363 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
364 ; RV64-BITS-UNKNOWN-NEXT: ret
366 ; RV64-BITS-256-LABEL: reverse_nxv16i1:
367 ; RV64-BITS-256: # %bb.0:
368 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma
369 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
370 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
371 ; RV64-BITS-256-NEXT: csrr a0, vlenb
372 ; RV64-BITS-256-NEXT: slli a0, a0, 1
373 ; RV64-BITS-256-NEXT: addi a0, a0, -1
374 ; RV64-BITS-256-NEXT: vid.v v10
375 ; RV64-BITS-256-NEXT: vrsub.vx v10, v10, a0
376 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v10
377 ; RV64-BITS-256-NEXT: vand.vi v8, v12, 1
378 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
379 ; RV64-BITS-256-NEXT: ret
381 ; RV64-BITS-512-LABEL: reverse_nxv16i1:
382 ; RV64-BITS-512: # %bb.0:
383 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma
384 ; RV64-BITS-512-NEXT: vmv.v.i v8, 0
385 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
386 ; RV64-BITS-512-NEXT: csrr a0, vlenb
387 ; RV64-BITS-512-NEXT: slli a0, a0, 1
388 ; RV64-BITS-512-NEXT: addi a0, a0, -1
389 ; RV64-BITS-512-NEXT: vid.v v10
390 ; RV64-BITS-512-NEXT: vrsub.vx v10, v10, a0
391 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v10
392 ; RV64-BITS-512-NEXT: vand.vi v8, v12, 1
393 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
394 ; RV64-BITS-512-NEXT: ret
395 %res = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> %a)
396 ret <vscale x 16 x i1> %res
399 define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
400 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i1:
401 ; RV32-BITS-UNKNOWN: # %bb.0:
402 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
403 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
404 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
405 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
406 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2
407 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
408 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, ma
409 ; RV32-BITS-UNKNOWN-NEXT: vid.v v16
410 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0
411 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
412 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
413 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1
414 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
415 ; RV32-BITS-UNKNOWN-NEXT: ret
417 ; RV32-BITS-256-LABEL: reverse_nxv32i1:
418 ; RV32-BITS-256: # %bb.0:
419 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
420 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
421 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
422 ; RV32-BITS-256-NEXT: csrr a0, vlenb
423 ; RV32-BITS-256-NEXT: slli a0, a0, 2
424 ; RV32-BITS-256-NEXT: addi a0, a0, -1
425 ; RV32-BITS-256-NEXT: vid.v v12
426 ; RV32-BITS-256-NEXT: vrsub.vx v12, v12, a0
427 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v12
428 ; RV32-BITS-256-NEXT: vand.vi v8, v16, 1
429 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
430 ; RV32-BITS-256-NEXT: ret
432 ; RV32-BITS-512-LABEL: reverse_nxv32i1:
433 ; RV32-BITS-512: # %bb.0:
434 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
435 ; RV32-BITS-512-NEXT: vmv.v.i v8, 0
436 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
437 ; RV32-BITS-512-NEXT: csrr a0, vlenb
438 ; RV32-BITS-512-NEXT: slli a0, a0, 2
439 ; RV32-BITS-512-NEXT: addi a0, a0, -1
440 ; RV32-BITS-512-NEXT: vid.v v12
441 ; RV32-BITS-512-NEXT: vrsub.vx v12, v12, a0
442 ; RV32-BITS-512-NEXT: vrgather.vv v16, v8, v12
443 ; RV32-BITS-512-NEXT: vand.vi v8, v16, 1
444 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
445 ; RV32-BITS-512-NEXT: ret
447 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1:
448 ; RV64-BITS-UNKNOWN: # %bb.0:
449 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
450 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0
451 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
452 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
453 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2
454 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
455 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, ma
456 ; RV64-BITS-UNKNOWN-NEXT: vid.v v16
457 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0
458 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
459 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
460 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1
461 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
462 ; RV64-BITS-UNKNOWN-NEXT: ret
464 ; RV64-BITS-256-LABEL: reverse_nxv32i1:
465 ; RV64-BITS-256: # %bb.0:
466 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
467 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
468 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
469 ; RV64-BITS-256-NEXT: csrr a0, vlenb
470 ; RV64-BITS-256-NEXT: slli a0, a0, 2
471 ; RV64-BITS-256-NEXT: addi a0, a0, -1
472 ; RV64-BITS-256-NEXT: vid.v v12
473 ; RV64-BITS-256-NEXT: vrsub.vx v12, v12, a0
474 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v12
475 ; RV64-BITS-256-NEXT: vand.vi v8, v16, 1
476 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
477 ; RV64-BITS-256-NEXT: ret
479 ; RV64-BITS-512-LABEL: reverse_nxv32i1:
480 ; RV64-BITS-512: # %bb.0:
481 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
482 ; RV64-BITS-512-NEXT: vmv.v.i v8, 0
483 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
484 ; RV64-BITS-512-NEXT: csrr a0, vlenb
485 ; RV64-BITS-512-NEXT: slli a0, a0, 2
486 ; RV64-BITS-512-NEXT: addi a0, a0, -1
487 ; RV64-BITS-512-NEXT: vid.v v12
488 ; RV64-BITS-512-NEXT: vrsub.vx v12, v12, a0
489 ; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v12
490 ; RV64-BITS-512-NEXT: vand.vi v8, v16, 1
491 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
492 ; RV64-BITS-512-NEXT: ret
493 %res = call <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1> %a)
494 ret <vscale x 32 x i1> %res
497 define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
498 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i1:
499 ; RV32-BITS-UNKNOWN: # %bb.0:
500 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
501 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2
502 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
503 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
504 ; RV32-BITS-UNKNOWN-NEXT: vid.v v8
505 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0
506 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
507 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0
508 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0
509 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
510 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8
511 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8
512 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
513 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1
514 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
515 ; RV32-BITS-UNKNOWN-NEXT: ret
517 ; RV32-BITS-256-LABEL: reverse_nxv64i1:
518 ; RV32-BITS-256: # %bb.0:
519 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma
520 ; RV32-BITS-256-NEXT: vmv.v.i v8, 0
521 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
522 ; RV32-BITS-256-NEXT: csrr a0, vlenb
523 ; RV32-BITS-256-NEXT: slli a0, a0, 3
524 ; RV32-BITS-256-NEXT: addi a0, a0, -1
525 ; RV32-BITS-256-NEXT: vid.v v16
526 ; RV32-BITS-256-NEXT: vrsub.vx v16, v16, a0
527 ; RV32-BITS-256-NEXT: vrgather.vv v24, v8, v16
528 ; RV32-BITS-256-NEXT: vand.vi v8, v24, 1
529 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
530 ; RV32-BITS-256-NEXT: ret
532 ; RV32-BITS-512-LABEL: reverse_nxv64i1:
533 ; RV32-BITS-512: # %bb.0:
534 ; RV32-BITS-512-NEXT: csrr a0, vlenb
535 ; RV32-BITS-512-NEXT: slli a0, a0, 2
536 ; RV32-BITS-512-NEXT: addi a0, a0, -1
537 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
538 ; RV32-BITS-512-NEXT: vid.v v8
539 ; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0
540 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
541 ; RV32-BITS-512-NEXT: vmv.v.i v16, 0
542 ; RV32-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0
543 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
544 ; RV32-BITS-512-NEXT: vrgather.vv v28, v16, v8
545 ; RV32-BITS-512-NEXT: vrgather.vv v24, v20, v8
546 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
547 ; RV32-BITS-512-NEXT: vand.vi v8, v24, 1
548 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
549 ; RV32-BITS-512-NEXT: ret
551 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i1:
552 ; RV64-BITS-UNKNOWN: # %bb.0:
553 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
554 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2
555 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
556 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
557 ; RV64-BITS-UNKNOWN-NEXT: vid.v v8
558 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0
559 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
560 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0
561 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0
562 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
563 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8
564 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8
565 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
566 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1
567 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
568 ; RV64-BITS-UNKNOWN-NEXT: ret
570 ; RV64-BITS-256-LABEL: reverse_nxv64i1:
571 ; RV64-BITS-256: # %bb.0:
572 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma
573 ; RV64-BITS-256-NEXT: vmv.v.i v8, 0
574 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
575 ; RV64-BITS-256-NEXT: csrr a0, vlenb
576 ; RV64-BITS-256-NEXT: slli a0, a0, 3
577 ; RV64-BITS-256-NEXT: addi a0, a0, -1
578 ; RV64-BITS-256-NEXT: vid.v v16
579 ; RV64-BITS-256-NEXT: vrsub.vx v16, v16, a0
580 ; RV64-BITS-256-NEXT: vrgather.vv v24, v8, v16
581 ; RV64-BITS-256-NEXT: vand.vi v8, v24, 1
582 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
583 ; RV64-BITS-256-NEXT: ret
585 ; RV64-BITS-512-LABEL: reverse_nxv64i1:
586 ; RV64-BITS-512: # %bb.0:
587 ; RV64-BITS-512-NEXT: csrr a0, vlenb
588 ; RV64-BITS-512-NEXT: slli a0, a0, 2
589 ; RV64-BITS-512-NEXT: addi a0, a0, -1
590 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
591 ; RV64-BITS-512-NEXT: vid.v v8
592 ; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0
593 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
594 ; RV64-BITS-512-NEXT: vmv.v.i v16, 0
595 ; RV64-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0
596 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
597 ; RV64-BITS-512-NEXT: vrgather.vv v28, v16, v8
598 ; RV64-BITS-512-NEXT: vrgather.vv v24, v20, v8
599 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
600 ; RV64-BITS-512-NEXT: vand.vi v8, v24, 1
601 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
602 ; RV64-BITS-512-NEXT: ret
603 %res = call <vscale x 64 x i1> @llvm.experimental.vector.reverse.nxv64i1(<vscale x 64 x i1> %a)
604 ret <vscale x 64 x i1> %res
608 ; VECTOR_REVERSE - integer
611 define <vscale x 1 x i8> @reverse_nxv1i8(<vscale x 1 x i8> %a) {
612 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv1i8:
613 ; RV32-BITS-UNKNOWN: # %bb.0:
614 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
615 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 3
616 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
617 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
618 ; RV32-BITS-UNKNOWN-NEXT: vid.v v9
619 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
620 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
621 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
622 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
623 ; RV32-BITS-UNKNOWN-NEXT: ret
625 ; RV32-BITS-256-LABEL: reverse_nxv1i8:
626 ; RV32-BITS-256: # %bb.0:
627 ; RV32-BITS-256-NEXT: csrr a0, vlenb
628 ; RV32-BITS-256-NEXT: srli a0, a0, 3
629 ; RV32-BITS-256-NEXT: addi a0, a0, -1
630 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
631 ; RV32-BITS-256-NEXT: vid.v v9
632 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0
633 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10
634 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9
635 ; RV32-BITS-256-NEXT: ret
637 ; RV32-BITS-512-LABEL: reverse_nxv1i8:
638 ; RV32-BITS-512: # %bb.0:
639 ; RV32-BITS-512-NEXT: csrr a0, vlenb
640 ; RV32-BITS-512-NEXT: srli a0, a0, 3
641 ; RV32-BITS-512-NEXT: addi a0, a0, -1
642 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
643 ; RV32-BITS-512-NEXT: vid.v v9
644 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0
645 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10
646 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9
647 ; RV32-BITS-512-NEXT: ret
649 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv1i8:
650 ; RV64-BITS-UNKNOWN: # %bb.0:
651 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
652 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3
653 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
654 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
655 ; RV64-BITS-UNKNOWN-NEXT: vid.v v9
656 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
657 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
658 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
659 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
660 ; RV64-BITS-UNKNOWN-NEXT: ret
662 ; RV64-BITS-256-LABEL: reverse_nxv1i8:
663 ; RV64-BITS-256: # %bb.0:
664 ; RV64-BITS-256-NEXT: csrr a0, vlenb
665 ; RV64-BITS-256-NEXT: srli a0, a0, 3
666 ; RV64-BITS-256-NEXT: addi a0, a0, -1
667 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
668 ; RV64-BITS-256-NEXT: vid.v v9
669 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0
670 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10
671 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9
672 ; RV64-BITS-256-NEXT: ret
674 ; RV64-BITS-512-LABEL: reverse_nxv1i8:
675 ; RV64-BITS-512: # %bb.0:
676 ; RV64-BITS-512-NEXT: csrr a0, vlenb
677 ; RV64-BITS-512-NEXT: srli a0, a0, 3
678 ; RV64-BITS-512-NEXT: addi a0, a0, -1
679 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
680 ; RV64-BITS-512-NEXT: vid.v v9
681 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0
682 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10
683 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9
684 ; RV64-BITS-512-NEXT: ret
685 %res = call <vscale x 1 x i8> @llvm.experimental.vector.reverse.nxv1i8(<vscale x 1 x i8> %a)
686 ret <vscale x 1 x i8> %res
689 define <vscale x 2 x i8> @reverse_nxv2i8(<vscale x 2 x i8> %a) {
690 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i8:
691 ; RV32-BITS-UNKNOWN: # %bb.0:
692 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
693 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2
694 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
695 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
696 ; RV32-BITS-UNKNOWN-NEXT: vid.v v9
697 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
698 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
699 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
700 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
701 ; RV32-BITS-UNKNOWN-NEXT: ret
703 ; RV32-BITS-256-LABEL: reverse_nxv2i8:
704 ; RV32-BITS-256: # %bb.0:
705 ; RV32-BITS-256-NEXT: csrr a0, vlenb
706 ; RV32-BITS-256-NEXT: srli a0, a0, 2
707 ; RV32-BITS-256-NEXT: addi a0, a0, -1
708 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
709 ; RV32-BITS-256-NEXT: vid.v v9
710 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0
711 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10
712 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9
713 ; RV32-BITS-256-NEXT: ret
715 ; RV32-BITS-512-LABEL: reverse_nxv2i8:
716 ; RV32-BITS-512: # %bb.0:
717 ; RV32-BITS-512-NEXT: csrr a0, vlenb
718 ; RV32-BITS-512-NEXT: srli a0, a0, 2
719 ; RV32-BITS-512-NEXT: addi a0, a0, -1
720 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
721 ; RV32-BITS-512-NEXT: vid.v v9
722 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0
723 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10
724 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9
725 ; RV32-BITS-512-NEXT: ret
727 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i8:
728 ; RV64-BITS-UNKNOWN: # %bb.0:
729 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
730 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2
731 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
732 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
733 ; RV64-BITS-UNKNOWN-NEXT: vid.v v9
734 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
735 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
736 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
737 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
738 ; RV64-BITS-UNKNOWN-NEXT: ret
740 ; RV64-BITS-256-LABEL: reverse_nxv2i8:
741 ; RV64-BITS-256: # %bb.0:
742 ; RV64-BITS-256-NEXT: csrr a0, vlenb
743 ; RV64-BITS-256-NEXT: srli a0, a0, 2
744 ; RV64-BITS-256-NEXT: addi a0, a0, -1
745 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
746 ; RV64-BITS-256-NEXT: vid.v v9
747 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0
748 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10
749 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9
750 ; RV64-BITS-256-NEXT: ret
752 ; RV64-BITS-512-LABEL: reverse_nxv2i8:
753 ; RV64-BITS-512: # %bb.0:
754 ; RV64-BITS-512-NEXT: csrr a0, vlenb
755 ; RV64-BITS-512-NEXT: srli a0, a0, 2
756 ; RV64-BITS-512-NEXT: addi a0, a0, -1
757 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
758 ; RV64-BITS-512-NEXT: vid.v v9
759 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0
760 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10
761 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9
762 ; RV64-BITS-512-NEXT: ret
763 %res = call <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8> %a)
764 ret <vscale x 2 x i8> %res
767 define <vscale x 4 x i8> @reverse_nxv4i8(<vscale x 4 x i8> %a) {
768 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i8:
769 ; RV32-BITS-UNKNOWN: # %bb.0:
770 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
771 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1
772 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
773 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
774 ; RV32-BITS-UNKNOWN-NEXT: vid.v v9
775 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
776 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
777 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
778 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
779 ; RV32-BITS-UNKNOWN-NEXT: ret
781 ; RV32-BITS-256-LABEL: reverse_nxv4i8:
782 ; RV32-BITS-256: # %bb.0:
783 ; RV32-BITS-256-NEXT: csrr a0, vlenb
784 ; RV32-BITS-256-NEXT: srli a0, a0, 1
785 ; RV32-BITS-256-NEXT: addi a0, a0, -1
786 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
787 ; RV32-BITS-256-NEXT: vid.v v9
788 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0
789 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10
790 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9
791 ; RV32-BITS-256-NEXT: ret
793 ; RV32-BITS-512-LABEL: reverse_nxv4i8:
794 ; RV32-BITS-512: # %bb.0:
795 ; RV32-BITS-512-NEXT: csrr a0, vlenb
796 ; RV32-BITS-512-NEXT: srli a0, a0, 1
797 ; RV32-BITS-512-NEXT: addi a0, a0, -1
798 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
799 ; RV32-BITS-512-NEXT: vid.v v9
800 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0
801 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10
802 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9
803 ; RV32-BITS-512-NEXT: ret
805 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i8:
806 ; RV64-BITS-UNKNOWN: # %bb.0:
807 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
808 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1
809 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
810 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
811 ; RV64-BITS-UNKNOWN-NEXT: vid.v v9
812 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0
813 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
814 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
815 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9
816 ; RV64-BITS-UNKNOWN-NEXT: ret
818 ; RV64-BITS-256-LABEL: reverse_nxv4i8:
819 ; RV64-BITS-256: # %bb.0:
820 ; RV64-BITS-256-NEXT: csrr a0, vlenb
821 ; RV64-BITS-256-NEXT: srli a0, a0, 1
822 ; RV64-BITS-256-NEXT: addi a0, a0, -1
823 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
824 ; RV64-BITS-256-NEXT: vid.v v9
825 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0
826 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10
827 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9
828 ; RV64-BITS-256-NEXT: ret
830 ; RV64-BITS-512-LABEL: reverse_nxv4i8:
831 ; RV64-BITS-512: # %bb.0:
832 ; RV64-BITS-512-NEXT: csrr a0, vlenb
833 ; RV64-BITS-512-NEXT: srli a0, a0, 1
834 ; RV64-BITS-512-NEXT: addi a0, a0, -1
835 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
836 ; RV64-BITS-512-NEXT: vid.v v9
837 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0
838 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10
839 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9
840 ; RV64-BITS-512-NEXT: ret
841 %res = call <vscale x 4 x i8> @llvm.experimental.vector.reverse.nxv4i8(<vscale x 4 x i8> %a)
842 ret <vscale x 4 x i8> %res
845 define <vscale x 8 x i8> @reverse_nxv8i8(<vscale x 8 x i8> %a) {
846 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i8:
847 ; RV32-BITS-UNKNOWN: # %bb.0:
848 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
849 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
850 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
851 ; RV32-BITS-UNKNOWN-NEXT: vid.v v10
852 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
853 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
854 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
855 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9
856 ; RV32-BITS-UNKNOWN-NEXT: ret
858 ; RV32-BITS-256-LABEL: reverse_nxv8i8:
859 ; RV32-BITS-256: # %bb.0:
860 ; RV32-BITS-256-NEXT: csrr a0, vlenb
861 ; RV32-BITS-256-NEXT: addi a0, a0, -1
862 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
863 ; RV32-BITS-256-NEXT: vid.v v9
864 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0
865 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10
866 ; RV32-BITS-256-NEXT: vmv.v.v v8, v9
867 ; RV32-BITS-256-NEXT: ret
869 ; RV32-BITS-512-LABEL: reverse_nxv8i8:
870 ; RV32-BITS-512: # %bb.0:
871 ; RV32-BITS-512-NEXT: csrr a0, vlenb
872 ; RV32-BITS-512-NEXT: addi a0, a0, -1
873 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
874 ; RV32-BITS-512-NEXT: vid.v v9
875 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0
876 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10
877 ; RV32-BITS-512-NEXT: vmv.v.v v8, v9
878 ; RV32-BITS-512-NEXT: ret
880 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8:
881 ; RV64-BITS-UNKNOWN: # %bb.0:
882 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
883 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
884 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
885 ; RV64-BITS-UNKNOWN-NEXT: vid.v v10
886 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
887 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
888 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
889 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9
890 ; RV64-BITS-UNKNOWN-NEXT: ret
892 ; RV64-BITS-256-LABEL: reverse_nxv8i8:
893 ; RV64-BITS-256: # %bb.0:
894 ; RV64-BITS-256-NEXT: csrr a0, vlenb
895 ; RV64-BITS-256-NEXT: addi a0, a0, -1
896 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
897 ; RV64-BITS-256-NEXT: vid.v v9
898 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0
899 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10
900 ; RV64-BITS-256-NEXT: vmv.v.v v8, v9
901 ; RV64-BITS-256-NEXT: ret
903 ; RV64-BITS-512-LABEL: reverse_nxv8i8:
904 ; RV64-BITS-512: # %bb.0:
905 ; RV64-BITS-512-NEXT: csrr a0, vlenb
906 ; RV64-BITS-512-NEXT: addi a0, a0, -1
907 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
908 ; RV64-BITS-512-NEXT: vid.v v9
909 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0
910 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10
911 ; RV64-BITS-512-NEXT: vmv.v.v v8, v9
912 ; RV64-BITS-512-NEXT: ret
913 %res = call <vscale x 8 x i8> @llvm.experimental.vector.reverse.nxv8i8(<vscale x 8 x i8> %a)
914 ret <vscale x 8 x i8> %res
917 define <vscale x 16 x i8> @reverse_nxv16i8(<vscale x 16 x i8> %a) {
918 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i8:
919 ; RV32-BITS-UNKNOWN: # %bb.0:
920 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
921 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1
922 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
923 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
924 ; RV32-BITS-UNKNOWN-NEXT: vid.v v12
925 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0
926 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma
927 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
928 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10
929 ; RV32-BITS-UNKNOWN-NEXT: ret
931 ; RV32-BITS-256-LABEL: reverse_nxv16i8:
932 ; RV32-BITS-256: # %bb.0:
933 ; RV32-BITS-256-NEXT: csrr a0, vlenb
934 ; RV32-BITS-256-NEXT: slli a0, a0, 1
935 ; RV32-BITS-256-NEXT: addi a0, a0, -1
936 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma
937 ; RV32-BITS-256-NEXT: vid.v v10
938 ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0
939 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12
940 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10
941 ; RV32-BITS-256-NEXT: ret
943 ; RV32-BITS-512-LABEL: reverse_nxv16i8:
944 ; RV32-BITS-512: # %bb.0:
945 ; RV32-BITS-512-NEXT: csrr a0, vlenb
946 ; RV32-BITS-512-NEXT: slli a0, a0, 1
947 ; RV32-BITS-512-NEXT: addi a0, a0, -1
948 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma
949 ; RV32-BITS-512-NEXT: vid.v v10
950 ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0
951 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12
952 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10
953 ; RV32-BITS-512-NEXT: ret
955 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8:
956 ; RV64-BITS-UNKNOWN: # %bb.0:
957 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
958 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1
959 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
960 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
961 ; RV64-BITS-UNKNOWN-NEXT: vid.v v12
962 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0
963 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma
964 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
965 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10
966 ; RV64-BITS-UNKNOWN-NEXT: ret
968 ; RV64-BITS-256-LABEL: reverse_nxv16i8:
969 ; RV64-BITS-256: # %bb.0:
970 ; RV64-BITS-256-NEXT: csrr a0, vlenb
971 ; RV64-BITS-256-NEXT: slli a0, a0, 1
972 ; RV64-BITS-256-NEXT: addi a0, a0, -1
973 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma
974 ; RV64-BITS-256-NEXT: vid.v v10
975 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0
976 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12
977 ; RV64-BITS-256-NEXT: vmv.v.v v8, v10
978 ; RV64-BITS-256-NEXT: ret
980 ; RV64-BITS-512-LABEL: reverse_nxv16i8:
981 ; RV64-BITS-512: # %bb.0:
982 ; RV64-BITS-512-NEXT: csrr a0, vlenb
983 ; RV64-BITS-512-NEXT: slli a0, a0, 1
984 ; RV64-BITS-512-NEXT: addi a0, a0, -1
985 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma
986 ; RV64-BITS-512-NEXT: vid.v v10
987 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0
988 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12
989 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10
990 ; RV64-BITS-512-NEXT: ret
991 %res = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> %a)
992 ret <vscale x 16 x i8> %res
995 define <vscale x 32 x i8> @reverse_nxv32i8(<vscale x 32 x i8> %a) {
996 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i8:
997 ; RV32-BITS-UNKNOWN: # %bb.0:
998 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
999 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2
1000 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
1001 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1002 ; RV32-BITS-UNKNOWN-NEXT: vid.v v16
1003 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0
1004 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
1005 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
1006 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12
1007 ; RV32-BITS-UNKNOWN-NEXT: ret
1009 ; RV32-BITS-256-LABEL: reverse_nxv32i8:
1010 ; RV32-BITS-256: # %bb.0:
1011 ; RV32-BITS-256-NEXT: csrr a0, vlenb
1012 ; RV32-BITS-256-NEXT: slli a0, a0, 2
1013 ; RV32-BITS-256-NEXT: addi a0, a0, -1
1014 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1015 ; RV32-BITS-256-NEXT: vid.v v12
1016 ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0
1017 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16
1018 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12
1019 ; RV32-BITS-256-NEXT: ret
1021 ; RV32-BITS-512-LABEL: reverse_nxv32i8:
1022 ; RV32-BITS-512: # %bb.0:
1023 ; RV32-BITS-512-NEXT: csrr a0, vlenb
1024 ; RV32-BITS-512-NEXT: slli a0, a0, 2
1025 ; RV32-BITS-512-NEXT: addi a0, a0, -1
1026 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1027 ; RV32-BITS-512-NEXT: vid.v v12
1028 ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0
1029 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16
1030 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12
1031 ; RV32-BITS-512-NEXT: ret
1033 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8:
1034 ; RV64-BITS-UNKNOWN: # %bb.0:
1035 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
1036 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2
1037 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
1038 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1039 ; RV64-BITS-UNKNOWN-NEXT: vid.v v16
1040 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0
1041 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
1042 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
1043 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12
1044 ; RV64-BITS-UNKNOWN-NEXT: ret
1046 ; RV64-BITS-256-LABEL: reverse_nxv32i8:
1047 ; RV64-BITS-256: # %bb.0:
1048 ; RV64-BITS-256-NEXT: csrr a0, vlenb
1049 ; RV64-BITS-256-NEXT: slli a0, a0, 2
1050 ; RV64-BITS-256-NEXT: addi a0, a0, -1
1051 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1052 ; RV64-BITS-256-NEXT: vid.v v12
1053 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0
1054 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16
1055 ; RV64-BITS-256-NEXT: vmv.v.v v8, v12
1056 ; RV64-BITS-256-NEXT: ret
1058 ; RV64-BITS-512-LABEL: reverse_nxv32i8:
1059 ; RV64-BITS-512: # %bb.0:
1060 ; RV64-BITS-512-NEXT: csrr a0, vlenb
1061 ; RV64-BITS-512-NEXT: slli a0, a0, 2
1062 ; RV64-BITS-512-NEXT: addi a0, a0, -1
1063 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1064 ; RV64-BITS-512-NEXT: vid.v v12
1065 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0
1066 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16
1067 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12
1068 ; RV64-BITS-512-NEXT: ret
1069 %res = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> %a)
1070 ret <vscale x 32 x i8> %res
1073 define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
1074 ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
1075 ; RV32-BITS-UNKNOWN: # %bb.0:
1076 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
1077 ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2
1078 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
1079 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1080 ; RV32-BITS-UNKNOWN-NEXT: vid.v v16
1081 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0
1082 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
1083 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24
1084 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24
1085 ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16
1086 ; RV32-BITS-UNKNOWN-NEXT: ret
1088 ; RV32-BITS-256-LABEL: reverse_nxv64i8:
1089 ; RV32-BITS-256: # %bb.0:
1090 ; RV32-BITS-256-NEXT: csrr a0, vlenb
1091 ; RV32-BITS-256-NEXT: slli a0, a0, 3
1092 ; RV32-BITS-256-NEXT: addi a0, a0, -1
1093 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma
1094 ; RV32-BITS-256-NEXT: vid.v v16
1095 ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0
1096 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24
1097 ; RV32-BITS-256-NEXT: vmv.v.v v8, v16
1098 ; RV32-BITS-256-NEXT: ret
1100 ; RV32-BITS-512-LABEL: reverse_nxv64i8:
1101 ; RV32-BITS-512: # %bb.0:
1102 ; RV32-BITS-512-NEXT: csrr a0, vlenb
1103 ; RV32-BITS-512-NEXT: slli a0, a0, 2
1104 ; RV32-BITS-512-NEXT: addi a0, a0, -1
1105 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1106 ; RV32-BITS-512-NEXT: vid.v v16
1107 ; RV32-BITS-512-NEXT: vrsub.vx v24, v16, a0
1108 ; RV32-BITS-512-NEXT: vrgather.vv v20, v8, v24
1109 ; RV32-BITS-512-NEXT: vrgather.vv v16, v12, v24
1110 ; RV32-BITS-512-NEXT: vmv8r.v v8, v16
1111 ; RV32-BITS-512-NEXT: ret
1113 ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
1114 ; RV64-BITS-UNKNOWN: # %bb.0:
1115 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
1116 ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2
1117 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
1118 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1119 ; RV64-BITS-UNKNOWN-NEXT: vid.v v16
1120 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0
1121 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma
1122 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24
1123 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24
1124 ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16
1125 ; RV64-BITS-UNKNOWN-NEXT: ret
1127 ; RV64-BITS-256-LABEL: reverse_nxv64i8:
1128 ; RV64-BITS-256: # %bb.0:
1129 ; RV64-BITS-256-NEXT: csrr a0, vlenb
1130 ; RV64-BITS-256-NEXT: slli a0, a0, 3
1131 ; RV64-BITS-256-NEXT: addi a0, a0, -1
1132 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma
1133 ; RV64-BITS-256-NEXT: vid.v v16
1134 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0
1135 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24
1136 ; RV64-BITS-256-NEXT: vmv.v.v v8, v16
1137 ; RV64-BITS-256-NEXT: ret
1139 ; RV64-BITS-512-LABEL: reverse_nxv64i8:
1140 ; RV64-BITS-512: # %bb.0:
1141 ; RV64-BITS-512-NEXT: csrr a0, vlenb
1142 ; RV64-BITS-512-NEXT: slli a0, a0, 2
1143 ; RV64-BITS-512-NEXT: addi a0, a0, -1
1144 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma
1145 ; RV64-BITS-512-NEXT: vid.v v16
1146 ; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0
1147 ; RV64-BITS-512-NEXT: vrgather.vv v20, v8, v24
1148 ; RV64-BITS-512-NEXT: vrgather.vv v16, v12, v24
1149 ; RV64-BITS-512-NEXT: vmv8r.v v8, v16
1150 ; RV64-BITS-512-NEXT: ret
1151 %res = call <vscale x 64 x i8> @llvm.experimental.vector.reverse.nxv64i8(<vscale x 64 x i8> %a)
1152 ret <vscale x 64 x i8> %res
1155 define <vscale x 1 x i16> @reverse_nxv1i16(<vscale x 1 x i16> %a) {
1156 ; CHECK-LABEL: reverse_nxv1i16:
1158 ; CHECK-NEXT: csrr a0, vlenb
1159 ; CHECK-NEXT: srli a0, a0, 3
1160 ; CHECK-NEXT: addi a0, a0, -1
1161 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1162 ; CHECK-NEXT: vid.v v9
1163 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1164 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1165 ; CHECK-NEXT: vmv1r.v v8, v9
1167 %res = call <vscale x 1 x i16> @llvm.experimental.vector.reverse.nxv1i16(<vscale x 1 x i16> %a)
1168 ret <vscale x 1 x i16> %res
1171 define <vscale x 2 x i16> @reverse_nxv2i16(<vscale x 2 x i16> %a) {
1172 ; CHECK-LABEL: reverse_nxv2i16:
1174 ; CHECK-NEXT: csrr a0, vlenb
1175 ; CHECK-NEXT: srli a0, a0, 2
1176 ; CHECK-NEXT: addi a0, a0, -1
1177 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1178 ; CHECK-NEXT: vid.v v9
1179 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1180 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1181 ; CHECK-NEXT: vmv1r.v v8, v9
1183 %res = call <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16> %a)
1184 ret <vscale x 2 x i16> %res
1187 define <vscale x 4 x i16> @reverse_nxv4i16(<vscale x 4 x i16> %a) {
1188 ; CHECK-LABEL: reverse_nxv4i16:
1190 ; CHECK-NEXT: csrr a0, vlenb
1191 ; CHECK-NEXT: srli a0, a0, 1
1192 ; CHECK-NEXT: addi a0, a0, -1
1193 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
1194 ; CHECK-NEXT: vid.v v9
1195 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1196 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1197 ; CHECK-NEXT: vmv.v.v v8, v9
1199 %res = call <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16> %a)
1200 ret <vscale x 4 x i16> %res
1203 define <vscale x 8 x i16> @reverse_nxv8i16(<vscale x 8 x i16> %a) {
1204 ; CHECK-LABEL: reverse_nxv8i16:
1206 ; CHECK-NEXT: csrr a0, vlenb
1207 ; CHECK-NEXT: addi a0, a0, -1
1208 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1209 ; CHECK-NEXT: vid.v v10
1210 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1211 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1212 ; CHECK-NEXT: vmv.v.v v8, v10
1214 %res = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> %a)
1215 ret <vscale x 8 x i16> %res
1218 define <vscale x 16 x i16> @reverse_nxv16i16(<vscale x 16 x i16> %a) {
1219 ; CHECK-LABEL: reverse_nxv16i16:
1221 ; CHECK-NEXT: csrr a0, vlenb
1222 ; CHECK-NEXT: slli a0, a0, 1
1223 ; CHECK-NEXT: addi a0, a0, -1
1224 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
1225 ; CHECK-NEXT: vid.v v12
1226 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1227 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1228 ; CHECK-NEXT: vmv.v.v v8, v12
1230 %res = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> %a)
1231 ret <vscale x 16 x i16> %res
1234 define <vscale x 32 x i16> @reverse_nxv32i16(<vscale x 32 x i16> %a) {
1235 ; CHECK-LABEL: reverse_nxv32i16:
1237 ; CHECK-NEXT: csrr a0, vlenb
1238 ; CHECK-NEXT: slli a0, a0, 2
1239 ; CHECK-NEXT: addi a0, a0, -1
1240 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1241 ; CHECK-NEXT: vid.v v16
1242 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1243 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1244 ; CHECK-NEXT: vmv.v.v v8, v16
1246 %res = call <vscale x 32 x i16> @llvm.experimental.vector.reverse.nxv32i16(<vscale x 32 x i16> %a)
1247 ret <vscale x 32 x i16> %res
1250 define <vscale x 1 x i32> @reverse_nxv1i32(<vscale x 1 x i32> %a) {
1251 ; CHECK-LABEL: reverse_nxv1i32:
1253 ; CHECK-NEXT: csrr a0, vlenb
1254 ; CHECK-NEXT: srli a0, a0, 3
1255 ; CHECK-NEXT: addi a0, a0, -1
1256 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1257 ; CHECK-NEXT: vid.v v9
1258 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1259 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1260 ; CHECK-NEXT: vmv1r.v v8, v9
1262 %res = call <vscale x 1 x i32> @llvm.experimental.vector.reverse.nxv1i32(<vscale x 1 x i32> %a)
1263 ret <vscale x 1 x i32> %res
1266 define <vscale x 2 x i32> @reverse_nxv2i32(<vscale x 2 x i32> %a) {
1267 ; CHECK-LABEL: reverse_nxv2i32:
1269 ; CHECK-NEXT: csrr a0, vlenb
1270 ; CHECK-NEXT: srli a0, a0, 2
1271 ; CHECK-NEXT: addi a0, a0, -1
1272 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1273 ; CHECK-NEXT: vid.v v9
1274 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1275 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1276 ; CHECK-NEXT: vmv.v.v v8, v9
1278 %res = call <vscale x 2 x i32> @llvm.experimental.vector.reverse.nxv2i32(<vscale x 2 x i32> %a)
1279 ret <vscale x 2 x i32> %res
1282 define <vscale x 4 x i32> @reverse_nxv4i32(<vscale x 4 x i32> %a) {
1283 ; CHECK-LABEL: reverse_nxv4i32:
1285 ; CHECK-NEXT: csrr a0, vlenb
1286 ; CHECK-NEXT: srli a0, a0, 1
1287 ; CHECK-NEXT: addi a0, a0, -1
1288 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1289 ; CHECK-NEXT: vid.v v10
1290 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1291 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1292 ; CHECK-NEXT: vmv.v.v v8, v10
1294 %res = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
1295 ret <vscale x 4 x i32> %res
1298 define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) {
1299 ; CHECK-LABEL: reverse_nxv8i32:
1301 ; CHECK-NEXT: csrr a0, vlenb
1302 ; CHECK-NEXT: addi a0, a0, -1
1303 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1304 ; CHECK-NEXT: vid.v v12
1305 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1306 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1307 ; CHECK-NEXT: vmv.v.v v8, v12
1309 %res = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> %a)
1310 ret <vscale x 8 x i32> %res
1313 define <vscale x 16 x i32> @reverse_nxv16i32(<vscale x 16 x i32> %a) {
1314 ; CHECK-LABEL: reverse_nxv16i32:
1316 ; CHECK-NEXT: csrr a0, vlenb
1317 ; CHECK-NEXT: slli a0, a0, 1
1318 ; CHECK-NEXT: addi a0, a0, -1
1319 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1320 ; CHECK-NEXT: vid.v v16
1321 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1322 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1323 ; CHECK-NEXT: vmv.v.v v8, v16
1325 %res = call <vscale x 16 x i32> @llvm.experimental.vector.reverse.nxv16i32(<vscale x 16 x i32> %a)
1326 ret <vscale x 16 x i32> %res
1329 define <vscale x 1 x i64> @reverse_nxv1i64(<vscale x 1 x i64> %a) {
1330 ; CHECK-LABEL: reverse_nxv1i64:
1332 ; CHECK-NEXT: csrr a0, vlenb
1333 ; CHECK-NEXT: srli a0, a0, 3
1334 ; CHECK-NEXT: addi a0, a0, -1
1335 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1336 ; CHECK-NEXT: vid.v v9
1337 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1338 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1339 ; CHECK-NEXT: vmv.v.v v8, v9
1341 %res = call <vscale x 1 x i64> @llvm.experimental.vector.reverse.nxv1i64(<vscale x 1 x i64> %a)
1342 ret <vscale x 1 x i64> %res
1345 define <vscale x 2 x i64> @reverse_nxv2i64(<vscale x 2 x i64> %a) {
1346 ; CHECK-LABEL: reverse_nxv2i64:
1348 ; CHECK-NEXT: csrr a0, vlenb
1349 ; CHECK-NEXT: srli a0, a0, 2
1350 ; CHECK-NEXT: addi a0, a0, -1
1351 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1352 ; CHECK-NEXT: vid.v v10
1353 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1354 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1355 ; CHECK-NEXT: vmv.v.v v8, v10
1357 %res = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> %a)
1358 ret <vscale x 2 x i64> %res
1361 define <vscale x 4 x i64> @reverse_nxv4i64(<vscale x 4 x i64> %a) {
1362 ; CHECK-LABEL: reverse_nxv4i64:
1364 ; CHECK-NEXT: csrr a0, vlenb
1365 ; CHECK-NEXT: srli a0, a0, 1
1366 ; CHECK-NEXT: addi a0, a0, -1
1367 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1368 ; CHECK-NEXT: vid.v v12
1369 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1370 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1371 ; CHECK-NEXT: vmv.v.v v8, v12
1373 %res = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> %a)
1374 ret <vscale x 4 x i64> %res
1377 define <vscale x 8 x i64> @reverse_nxv8i64(<vscale x 8 x i64> %a) {
1378 ; CHECK-LABEL: reverse_nxv8i64:
1380 ; CHECK-NEXT: csrr a0, vlenb
1381 ; CHECK-NEXT: addi a0, a0, -1
1382 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1383 ; CHECK-NEXT: vid.v v16
1384 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1385 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1386 ; CHECK-NEXT: vmv.v.v v8, v16
1388 %res = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> %a)
1389 ret <vscale x 8 x i64> %res
1393 ; VECTOR_REVERSE - floating point
1396 define <vscale x 1 x half> @reverse_nxv1f16(<vscale x 1 x half> %a) {
1397 ; CHECK-LABEL: reverse_nxv1f16:
1399 ; CHECK-NEXT: csrr a0, vlenb
1400 ; CHECK-NEXT: srli a0, a0, 3
1401 ; CHECK-NEXT: addi a0, a0, -1
1402 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
1403 ; CHECK-NEXT: vid.v v9
1404 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1405 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1406 ; CHECK-NEXT: vmv1r.v v8, v9
1408 %res = call <vscale x 1 x half> @llvm.experimental.vector.reverse.nxv1f16(<vscale x 1 x half> %a)
1409 ret <vscale x 1 x half> %res
1412 define <vscale x 2 x half> @reverse_nxv2f16(<vscale x 2 x half> %a) {
1413 ; CHECK-LABEL: reverse_nxv2f16:
1415 ; CHECK-NEXT: csrr a0, vlenb
1416 ; CHECK-NEXT: srli a0, a0, 2
1417 ; CHECK-NEXT: addi a0, a0, -1
1418 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
1419 ; CHECK-NEXT: vid.v v9
1420 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1421 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1422 ; CHECK-NEXT: vmv1r.v v8, v9
1424 %res = call <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half> %a)
1425 ret <vscale x 2 x half> %res
1428 define <vscale x 4 x half> @reverse_nxv4f16(<vscale x 4 x half> %a) {
1429 ; CHECK-LABEL: reverse_nxv4f16:
1431 ; CHECK-NEXT: csrr a0, vlenb
1432 ; CHECK-NEXT: srli a0, a0, 1
1433 ; CHECK-NEXT: addi a0, a0, -1
1434 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
1435 ; CHECK-NEXT: vid.v v9
1436 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1437 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1438 ; CHECK-NEXT: vmv.v.v v8, v9
1440 %res = call <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half> %a)
1441 ret <vscale x 4 x half> %res
1444 define <vscale x 8 x half> @reverse_nxv8f16(<vscale x 8 x half> %a) {
1445 ; CHECK-LABEL: reverse_nxv8f16:
1447 ; CHECK-NEXT: csrr a0, vlenb
1448 ; CHECK-NEXT: addi a0, a0, -1
1449 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1450 ; CHECK-NEXT: vid.v v10
1451 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1452 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1453 ; CHECK-NEXT: vmv.v.v v8, v10
1455 %res = call <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half> %a)
1456 ret <vscale x 8 x half> %res
1459 define <vscale x 16 x half> @reverse_nxv16f16(<vscale x 16 x half> %a) {
1460 ; CHECK-LABEL: reverse_nxv16f16:
1462 ; CHECK-NEXT: csrr a0, vlenb
1463 ; CHECK-NEXT: slli a0, a0, 1
1464 ; CHECK-NEXT: addi a0, a0, -1
1465 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
1466 ; CHECK-NEXT: vid.v v12
1467 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1468 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1469 ; CHECK-NEXT: vmv.v.v v8, v12
1471 %res = call <vscale x 16 x half> @llvm.experimental.vector.reverse.nxv16f16(<vscale x 16 x half> %a)
1472 ret <vscale x 16 x half> %res
1475 define <vscale x 32 x half> @reverse_nxv32f16(<vscale x 32 x half> %a) {
1476 ; CHECK-LABEL: reverse_nxv32f16:
1478 ; CHECK-NEXT: csrr a0, vlenb
1479 ; CHECK-NEXT: slli a0, a0, 2
1480 ; CHECK-NEXT: addi a0, a0, -1
1481 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
1482 ; CHECK-NEXT: vid.v v16
1483 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1484 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1485 ; CHECK-NEXT: vmv.v.v v8, v16
1487 %res = call <vscale x 32 x half> @llvm.experimental.vector.reverse.nxv32f16(<vscale x 32 x half> %a)
1488 ret <vscale x 32 x half> %res
1491 define <vscale x 1 x float> @reverse_nxv1f32(<vscale x 1 x float> %a) {
1492 ; CHECK-LABEL: reverse_nxv1f32:
1494 ; CHECK-NEXT: csrr a0, vlenb
1495 ; CHECK-NEXT: srli a0, a0, 3
1496 ; CHECK-NEXT: addi a0, a0, -1
1497 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
1498 ; CHECK-NEXT: vid.v v9
1499 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1500 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1501 ; CHECK-NEXT: vmv1r.v v8, v9
1503 %res = call <vscale x 1 x float> @llvm.experimental.vector.reverse.nxv1f32(<vscale x 1 x float> %a)
1504 ret <vscale x 1 x float> %res
1507 define <vscale x 2 x float> @reverse_nxv2f32(<vscale x 2 x float> %a) {
1508 ; CHECK-LABEL: reverse_nxv2f32:
1510 ; CHECK-NEXT: csrr a0, vlenb
1511 ; CHECK-NEXT: srli a0, a0, 2
1512 ; CHECK-NEXT: addi a0, a0, -1
1513 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
1514 ; CHECK-NEXT: vid.v v9
1515 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1516 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1517 ; CHECK-NEXT: vmv.v.v v8, v9
1519 %res = call <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float> %a)
1520 ret <vscale x 2 x float> %res
1523 define <vscale x 4 x float> @reverse_nxv4f32(<vscale x 4 x float> %a) {
1524 ; CHECK-LABEL: reverse_nxv4f32:
1526 ; CHECK-NEXT: csrr a0, vlenb
1527 ; CHECK-NEXT: srli a0, a0, 1
1528 ; CHECK-NEXT: addi a0, a0, -1
1529 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1530 ; CHECK-NEXT: vid.v v10
1531 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1532 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1533 ; CHECK-NEXT: vmv.v.v v8, v10
1535 %res = call <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
1536 ret <vscale x 4 x float> %res
1539 define <vscale x 8 x float> @reverse_nxv8f32(<vscale x 8 x float> %a) {
1540 ; CHECK-LABEL: reverse_nxv8f32:
1542 ; CHECK-NEXT: csrr a0, vlenb
1543 ; CHECK-NEXT: addi a0, a0, -1
1544 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1545 ; CHECK-NEXT: vid.v v12
1546 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1547 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1548 ; CHECK-NEXT: vmv.v.v v8, v12
1550 %res = call <vscale x 8 x float> @llvm.experimental.vector.reverse.nxv8f32(<vscale x 8 x float> %a)
1551 ret <vscale x 8 x float> %res
1554 define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) {
1555 ; CHECK-LABEL: reverse_nxv16f32:
1557 ; CHECK-NEXT: csrr a0, vlenb
1558 ; CHECK-NEXT: slli a0, a0, 1
1559 ; CHECK-NEXT: addi a0, a0, -1
1560 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
1561 ; CHECK-NEXT: vid.v v16
1562 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1563 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1564 ; CHECK-NEXT: vmv.v.v v8, v16
1566 %res = call <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float> %a)
1567 ret <vscale x 16 x float> %res
1570 define <vscale x 1 x double> @reverse_nxv1f64(<vscale x 1 x double> %a) {
1571 ; CHECK-LABEL: reverse_nxv1f64:
1573 ; CHECK-NEXT: csrr a0, vlenb
1574 ; CHECK-NEXT: srli a0, a0, 3
1575 ; CHECK-NEXT: addi a0, a0, -1
1576 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1577 ; CHECK-NEXT: vid.v v9
1578 ; CHECK-NEXT: vrsub.vx v10, v9, a0
1579 ; CHECK-NEXT: vrgather.vv v9, v8, v10
1580 ; CHECK-NEXT: vmv.v.v v8, v9
1582 %res = call <vscale x 1 x double> @llvm.experimental.vector.reverse.nxv1f64(<vscale x 1 x double> %a)
1583 ret <vscale x 1 x double> %res
1586 define <vscale x 2 x double> @reverse_nxv2f64(<vscale x 2 x double> %a) {
1587 ; CHECK-LABEL: reverse_nxv2f64:
1589 ; CHECK-NEXT: csrr a0, vlenb
1590 ; CHECK-NEXT: srli a0, a0, 2
1591 ; CHECK-NEXT: addi a0, a0, -1
1592 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1593 ; CHECK-NEXT: vid.v v10
1594 ; CHECK-NEXT: vrsub.vx v12, v10, a0
1595 ; CHECK-NEXT: vrgather.vv v10, v8, v12
1596 ; CHECK-NEXT: vmv.v.v v8, v10
1598 %res = call <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double> %a)
1599 ret <vscale x 2 x double> %res
1602 define <vscale x 4 x double> @reverse_nxv4f64(<vscale x 4 x double> %a) {
1603 ; CHECK-LABEL: reverse_nxv4f64:
1605 ; CHECK-NEXT: csrr a0, vlenb
1606 ; CHECK-NEXT: srli a0, a0, 1
1607 ; CHECK-NEXT: addi a0, a0, -1
1608 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1609 ; CHECK-NEXT: vid.v v12
1610 ; CHECK-NEXT: vrsub.vx v16, v12, a0
1611 ; CHECK-NEXT: vrgather.vv v12, v8, v16
1612 ; CHECK-NEXT: vmv.v.v v8, v12
1614 %res = call <vscale x 4 x double> @llvm.experimental.vector.reverse.nxv4f64(<vscale x 4 x double> %a)
1615 ret <vscale x 4 x double> %res
1618 define <vscale x 8 x double> @reverse_nxv8f64(<vscale x 8 x double> %a) {
1619 ; CHECK-LABEL: reverse_nxv8f64:
1621 ; CHECK-NEXT: csrr a0, vlenb
1622 ; CHECK-NEXT: addi a0, a0, -1
1623 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1624 ; CHECK-NEXT: vid.v v16
1625 ; CHECK-NEXT: vrsub.vx v24, v16, a0
1626 ; CHECK-NEXT: vrgather.vv v16, v8, v24
1627 ; CHECK-NEXT: vmv.v.v v8, v16
1629 %res = call <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double> %a)
1630 ret <vscale x 8 x double> %res
1633 ; Test widen reverse vector
1635 define <vscale x 3 x i64> @reverse_nxv3i64(<vscale x 3 x i64> %a) {
1636 ; CHECK-LABEL: reverse_nxv3i64:
1638 ; CHECK-NEXT: csrr a0, vlenb
1639 ; CHECK-NEXT: srli a0, a0, 1
1640 ; CHECK-NEXT: addi a0, a0, -1
1641 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1642 ; CHECK-NEXT: vid.v v12
1643 ; CHECK-NEXT: vrsub.vx v12, v12, a0
1644 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1645 ; CHECK-NEXT: vmv1r.v v8, v17
1646 ; CHECK-NEXT: vmv1r.v v9, v18
1647 ; CHECK-NEXT: vmv1r.v v10, v19
1649 %res = call <vscale x 3 x i64> @llvm.experimental.vector.reverse.nxv3i64(<vscale x 3 x i64> %a)
1650 ret <vscale x 3 x i64> %res
1653 define <vscale x 6 x i64> @reverse_nxv6i64(<vscale x 6 x i64> %a) {
1654 ; CHECK-LABEL: reverse_nxv6i64:
1656 ; CHECK-NEXT: csrr a0, vlenb
1657 ; CHECK-NEXT: addi a0, a0, -1
1658 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1659 ; CHECK-NEXT: vid.v v16
1660 ; CHECK-NEXT: vrsub.vx v16, v16, a0
1661 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1662 ; CHECK-NEXT: vmv2r.v v8, v26
1663 ; CHECK-NEXT: vmv2r.v v10, v28
1664 ; CHECK-NEXT: vmv2r.v v12, v30
1666 %res = call <vscale x 6 x i64> @llvm.experimental.vector.reverse.nxv6i64(<vscale x 6 x i64> %a)
1667 ret <vscale x 6 x i64> %res
1670 define <vscale x 12 x i64> @reverse_nxv12i64(<vscale x 12 x i64> %a) {
1671 ; RV32-LABEL: reverse_nxv12i64:
1673 ; RV32-NEXT: addi sp, sp, -80
1674 ; RV32-NEXT: .cfi_def_cfa_offset 80
1675 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
1676 ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
1677 ; RV32-NEXT: .cfi_offset ra, -4
1678 ; RV32-NEXT: .cfi_offset s0, -8
1679 ; RV32-NEXT: addi s0, sp, 80
1680 ; RV32-NEXT: .cfi_def_cfa s0, 0
1681 ; RV32-NEXT: csrr a0, vlenb
1682 ; RV32-NEXT: slli a0, a0, 4
1683 ; RV32-NEXT: sub sp, sp, a0
1684 ; RV32-NEXT: andi sp, sp, -64
1685 ; RV32-NEXT: csrr a0, vlenb
1686 ; RV32-NEXT: addi a1, a0, -1
1687 ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
1688 ; RV32-NEXT: vid.v v24
1689 ; RV32-NEXT: vrsub.vx v24, v24, a1
1690 ; RV32-NEXT: vrgather.vv v0, v16, v24
1691 ; RV32-NEXT: vmv4r.v v16, v4
1692 ; RV32-NEXT: vrgather.vv v0, v8, v24
1693 ; RV32-NEXT: vmv4r.v v20, v0
1694 ; RV32-NEXT: slli a0, a0, 3
1695 ; RV32-NEXT: addi a1, sp, 64
1696 ; RV32-NEXT: add a0, a1, a0
1697 ; RV32-NEXT: vs4r.v v4, (a0)
1698 ; RV32-NEXT: vs8r.v v16, (a1)
1699 ; RV32-NEXT: vl8re64.v v16, (a0)
1700 ; RV32-NEXT: vl8re64.v v8, (a1)
1701 ; RV32-NEXT: addi sp, s0, -80
1702 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
1703 ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
1704 ; RV32-NEXT: addi sp, sp, 80
1707 ; RV64-LABEL: reverse_nxv12i64:
1709 ; RV64-NEXT: addi sp, sp, -80
1710 ; RV64-NEXT: .cfi_def_cfa_offset 80
1711 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
1712 ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
1713 ; RV64-NEXT: .cfi_offset ra, -8
1714 ; RV64-NEXT: .cfi_offset s0, -16
1715 ; RV64-NEXT: addi s0, sp, 80
1716 ; RV64-NEXT: .cfi_def_cfa s0, 0
1717 ; RV64-NEXT: csrr a0, vlenb
1718 ; RV64-NEXT: slli a0, a0, 4
1719 ; RV64-NEXT: sub sp, sp, a0
1720 ; RV64-NEXT: andi sp, sp, -64
1721 ; RV64-NEXT: csrr a0, vlenb
1722 ; RV64-NEXT: addi a1, a0, -1
1723 ; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
1724 ; RV64-NEXT: vid.v v24
1725 ; RV64-NEXT: vrsub.vx v24, v24, a1
1726 ; RV64-NEXT: vrgather.vv v0, v16, v24
1727 ; RV64-NEXT: vmv4r.v v16, v4
1728 ; RV64-NEXT: vrgather.vv v0, v8, v24
1729 ; RV64-NEXT: vmv4r.v v20, v0
1730 ; RV64-NEXT: slli a0, a0, 3
1731 ; RV64-NEXT: addi a1, sp, 64
1732 ; RV64-NEXT: add a0, a1, a0
1733 ; RV64-NEXT: vs4r.v v4, (a0)
1734 ; RV64-NEXT: vs8r.v v16, (a1)
1735 ; RV64-NEXT: vl8re64.v v16, (a0)
1736 ; RV64-NEXT: vl8re64.v v8, (a1)
1737 ; RV64-NEXT: addi sp, s0, -80
1738 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
1739 ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
1740 ; RV64-NEXT: addi sp, sp, 80
1742 %res = call <vscale x 12 x i64> @llvm.experimental.vector.reverse.nxv12i64(<vscale x 12 x i64> %a)
1743 ret <vscale x 12 x i64> %res
1746 declare <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1>)
1747 declare <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1>)
1748 declare <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1>)
1749 declare <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1>)
1750 declare <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1>)
1751 declare <vscale x 64 x i1> @llvm.experimental.vector.reverse.nxv64i1(<vscale x 64 x i1>)
1752 declare <vscale x 1 x i8> @llvm.experimental.vector.reverse.nxv1i8(<vscale x 1 x i8>)
1753 declare <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8>)
1754 declare <vscale x 4 x i8> @llvm.experimental.vector.reverse.nxv4i8(<vscale x 4 x i8>)
1755 declare <vscale x 8 x i8> @llvm.experimental.vector.reverse.nxv8i8(<vscale x 8 x i8>)
1756 declare <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8>)
1757 declare <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8>)
1758 declare <vscale x 64 x i8> @llvm.experimental.vector.reverse.nxv64i8(<vscale x 64 x i8>)
1759 declare <vscale x 1 x i16> @llvm.experimental.vector.reverse.nxv1i16(<vscale x 1 x i16>)
1760 declare <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16>)
1761 declare <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16>)
1762 declare <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16>)
1763 declare <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16>)
1764 declare <vscale x 32 x i16> @llvm.experimental.vector.reverse.nxv32i16(<vscale x 32 x i16>)
1765 declare <vscale x 1 x i32> @llvm.experimental.vector.reverse.nxv1i32(<vscale x 1 x i32>)
1766 declare <vscale x 2 x i32> @llvm.experimental.vector.reverse.nxv2i32(<vscale x 2 x i32>)
1767 declare <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32>)
1768 declare <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32>)
1769 declare <vscale x 16 x i32> @llvm.experimental.vector.reverse.nxv16i32(<vscale x 16 x i32>)
1770 declare <vscale x 1 x i64> @llvm.experimental.vector.reverse.nxv1i64(<vscale x 1 x i64>)
1771 declare <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64>)
1772 declare <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64>)
1773 declare <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64>)
1774 declare <vscale x 1 x half> @llvm.experimental.vector.reverse.nxv1f16(<vscale x 1 x half>)
1775 declare <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half>)
1776 declare <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half>)
1777 declare <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half>)
1778 declare <vscale x 16 x half> @llvm.experimental.vector.reverse.nxv16f16(<vscale x 16 x half>)
1779 declare <vscale x 32 x half> @llvm.experimental.vector.reverse.nxv32f16(<vscale x 32 x half>)
1780 declare <vscale x 1 x float> @llvm.experimental.vector.reverse.nxv1f32(<vscale x 1 x float>)
1781 declare <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float>)
1782 declare <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float>)
1783 declare <vscale x 8 x float> @llvm.experimental.vector.reverse.nxv8f32(<vscale x 8 x float>)
1784 declare <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float>)
1785 declare <vscale x 1 x double> @llvm.experimental.vector.reverse.nxv1f64(<vscale x 1 x double>)
1786 declare <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double>)
1787 declare <vscale x 4 x double> @llvm.experimental.vector.reverse.nxv4f64(<vscale x 4 x double>)
1788 declare <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double>)
1789 declare <vscale x 3 x i64> @llvm.experimental.vector.reverse.nxv3i64(<vscale x 3 x i64>)
1790 declare <vscale x 6 x i64> @llvm.experimental.vector.reverse.nxv6i64(<vscale x 6 x i64>)
1791 declare <vscale x 12 x i64> @llvm.experimental.vector.reverse.nxv12i64(<vscale x 12 x i64>)