1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-UNKNOWN
3 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-256
4 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-512
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-UNKNOWN
6 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-256
7 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-512
8 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB,RV32-ZVBB
9 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB,RV64-ZVBB
12 ; VECTOR_REVERSE - masks
15 define <2 x i1> @reverse_v2i1(<2 x i1> %a) {
16 ; NO-ZVBB-LABEL: reverse_v2i1:
18 ; NO-ZVBB-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
19 ; NO-ZVBB-NEXT: vmv.v.i v8, 0
20 ; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0
21 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
22 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
23 ; NO-ZVBB-NEXT: vmsne.vi v0, v9, 0
26 ; ZVBB-LABEL: reverse_v2i1:
28 ; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
29 ; ZVBB-NEXT: vbrev.v v8, v0
30 ; ZVBB-NEXT: vsrl.vi v0, v8, 6
32 %res = call <2 x i1> @llvm.experimental.vector.reverse.v2i1(<2 x i1> %a)
36 define <4 x i1> @reverse_v4i1(<4 x i1> %a) {
37 ; NO-ZVBB-LABEL: reverse_v4i1:
39 ; NO-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
40 ; NO-ZVBB-NEXT: vmv.v.i v8, 0
41 ; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0
42 ; NO-ZVBB-NEXT: vid.v v9
43 ; NO-ZVBB-NEXT: vrsub.vi v9, v9, 3
44 ; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9
45 ; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0
48 ; ZVBB-LABEL: reverse_v4i1:
50 ; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
51 ; ZVBB-NEXT: vbrev.v v8, v0
52 ; ZVBB-NEXT: vsrl.vi v0, v8, 4
54 %res = call <4 x i1> @llvm.experimental.vector.reverse.v4i1(<4 x i1> %a)
58 define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
59 ; NO-ZVBB-LABEL: reverse_v8i1:
61 ; NO-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
62 ; NO-ZVBB-NEXT: vmv.v.i v8, 0
63 ; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0
64 ; NO-ZVBB-NEXT: vid.v v9
65 ; NO-ZVBB-NEXT: vrsub.vi v9, v9, 7
66 ; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9
67 ; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0
70 ; ZVBB-LABEL: reverse_v8i1:
72 ; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
73 ; ZVBB-NEXT: vbrev.v v0, v0
75 %res = call <8 x i1> @llvm.experimental.vector.reverse.v8i1(<8 x i1> %a)
79 define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
80 ; NO-ZVBB-LABEL: reverse_v16i1:
82 ; NO-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
83 ; NO-ZVBB-NEXT: vmv.v.i v8, 0
84 ; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0
85 ; NO-ZVBB-NEXT: vid.v v9
86 ; NO-ZVBB-NEXT: vrsub.vi v9, v9, 15
87 ; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9
88 ; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0
91 ; ZVBB-LABEL: reverse_v16i1:
93 ; ZVBB-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
94 ; ZVBB-NEXT: vbrev.v v0, v0
96 %res = call <16 x i1> @llvm.experimental.vector.reverse.v16i1(<16 x i1> %a)
100 define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
101 ; NO-ZVBB-LABEL: reverse_v32i1:
103 ; NO-ZVBB-NEXT: li a0, 32
104 ; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
105 ; NO-ZVBB-NEXT: lui a0, %hi(.LCPI4_0)
106 ; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI4_0)
107 ; NO-ZVBB-NEXT: vle8.v v8, (a0)
108 ; NO-ZVBB-NEXT: vmv.v.i v10, 0
109 ; NO-ZVBB-NEXT: vmerge.vim v10, v10, 1, v0
110 ; NO-ZVBB-NEXT: vrgather.vv v12, v10, v8
111 ; NO-ZVBB-NEXT: vmsne.vi v0, v12, 0
114 ; ZVBB-LABEL: reverse_v32i1:
116 ; ZVBB-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
117 ; ZVBB-NEXT: vbrev.v v0, v0
119 %res = call <32 x i1> @llvm.experimental.vector.reverse.v32i1(<32 x i1> %a)
123 define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
124 ; NO-ZVBB-LABEL: reverse_v64i1:
126 ; NO-ZVBB-NEXT: li a0, 64
127 ; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
128 ; NO-ZVBB-NEXT: lui a0, %hi(.LCPI5_0)
129 ; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI5_0)
130 ; NO-ZVBB-NEXT: vle8.v v8, (a0)
131 ; NO-ZVBB-NEXT: vmv.v.i v12, 0
132 ; NO-ZVBB-NEXT: vmerge.vim v12, v12, 1, v0
133 ; NO-ZVBB-NEXT: vrgather.vv v16, v12, v8
134 ; NO-ZVBB-NEXT: vmsne.vi v0, v16, 0
137 ; ZVBB-LABEL: reverse_v64i1:
139 ; ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma
140 ; ZVBB-NEXT: vbrev.v v0, v0
142 %res = call <64 x i1> @llvm.experimental.vector.reverse.v64i1(<64 x i1> %a)
146 define <128 x i1> @reverse_v128i1(<128 x i1> %a) {
147 ; CHECK-LABEL: reverse_v128i1:
149 ; CHECK-NEXT: li a0, 128
150 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
151 ; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
152 ; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
153 ; CHECK-NEXT: vle8.v v8, (a0)
154 ; CHECK-NEXT: vmv.v.i v16, 0
155 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
156 ; CHECK-NEXT: vrgather.vv v24, v16, v8
157 ; CHECK-NEXT: vmsne.vi v0, v24, 0
159 %res = call <128 x i1> @llvm.experimental.vector.reverse.v128i1(<128 x i1> %a)
163 define <1 x i8> @reverse_v1i8(<1 x i8> %a) {
164 ; CHECK-LABEL: reverse_v1i8:
167 %res = call <1 x i8> @llvm.experimental.vector.reverse.v1i8(<1 x i8> %a)
171 define <2 x i8> @reverse_v2i8(<2 x i8> %a) {
172 ; NO-ZVBB-LABEL: reverse_v2i8:
174 ; NO-ZVBB-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
175 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
176 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
177 ; NO-ZVBB-NEXT: vmv1r.v v8, v9
180 ; ZVBB-LABEL: reverse_v2i8:
182 ; ZVBB-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
183 ; ZVBB-NEXT: vrev8.v v8, v8
185 %res = call <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8> %a)
189 define <4 x i8> @reverse_v4i8(<4 x i8> %a) {
190 ; CHECK-LABEL: reverse_v4i8:
192 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
193 ; CHECK-NEXT: vid.v v9
194 ; CHECK-NEXT: vrsub.vi v10, v9, 3
195 ; CHECK-NEXT: vrgather.vv v9, v8, v10
196 ; CHECK-NEXT: vmv1r.v v8, v9
198 %res = call <4 x i8> @llvm.experimental.vector.reverse.v4i8(<4 x i8> %a)
202 define <8 x i8> @reverse_v8i8(<8 x i8> %a) {
203 ; CHECK-LABEL: reverse_v8i8:
205 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
206 ; CHECK-NEXT: vid.v v9
207 ; CHECK-NEXT: vrsub.vi v10, v9, 7
208 ; CHECK-NEXT: vrgather.vv v9, v8, v10
209 ; CHECK-NEXT: vmv1r.v v8, v9
211 %res = call <8 x i8> @llvm.experimental.vector.reverse.v8i8(<8 x i8> %a)
215 define <16 x i8> @reverse_v16i8(<16 x i8> %a) {
216 ; CHECK-LABEL: reverse_v16i8:
218 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
219 ; CHECK-NEXT: vid.v v9
220 ; CHECK-NEXT: vrsub.vi v10, v9, 15
221 ; CHECK-NEXT: vrgather.vv v9, v8, v10
222 ; CHECK-NEXT: vmv.v.v v8, v9
224 %res = call <16 x i8> @llvm.experimental.vector.reverse.v16i8(<16 x i8> %a)
228 define <32 x i8> @reverse_v32i8(<32 x i8> %a) {
229 ; CHECK-LABEL: reverse_v32i8:
231 ; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
232 ; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0)
233 ; CHECK-NEXT: li a1, 32
234 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
235 ; CHECK-NEXT: vle8.v v12, (a0)
236 ; CHECK-NEXT: vrgather.vv v10, v8, v12
237 ; CHECK-NEXT: vmv.v.v v8, v10
239 %res = call <32 x i8> @llvm.experimental.vector.reverse.v32i8(<32 x i8> %a)
243 define <64 x i8> @reverse_v64i8(<64 x i8> %a) {
244 ; CHECK-LABEL: reverse_v64i8:
246 ; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
247 ; CHECK-NEXT: addi a0, a0, %lo(.LCPI13_0)
248 ; CHECK-NEXT: li a1, 64
249 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
250 ; CHECK-NEXT: vle8.v v16, (a0)
251 ; CHECK-NEXT: vrgather.vv v12, v8, v16
252 ; CHECK-NEXT: vmv.v.v v8, v12
254 %res = call <64 x i8> @llvm.experimental.vector.reverse.v64i8(<64 x i8> %a)
258 define <1 x i16> @reverse_v1i16(<1 x i16> %a) {
259 ; CHECK-LABEL: reverse_v1i16:
262 %res = call <1 x i16> @llvm.experimental.vector.reverse.v1i16(<1 x i16> %a)
266 define <2 x i16> @reverse_v2i16(<2 x i16> %a) {
267 ; NO-ZVBB-LABEL: reverse_v2i16:
269 ; NO-ZVBB-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
270 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
271 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
272 ; NO-ZVBB-NEXT: vmv1r.v v8, v9
275 ; ZVBB-LABEL: reverse_v2i16:
277 ; ZVBB-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
278 ; ZVBB-NEXT: vror.vi v8, v8, 16
280 %res = call <2 x i16> @llvm.experimental.vector.reverse.v2i16(<2 x i16> %a)
284 define <4 x i16> @reverse_v4i16(<4 x i16> %a) {
285 ; CHECK-LABEL: reverse_v4i16:
287 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
288 ; CHECK-NEXT: vid.v v9
289 ; CHECK-NEXT: vrsub.vi v10, v9, 3
290 ; CHECK-NEXT: vrgather.vv v9, v8, v10
291 ; CHECK-NEXT: vmv1r.v v8, v9
293 %res = call <4 x i16> @llvm.experimental.vector.reverse.v4i16(<4 x i16> %a)
297 define <8 x i16> @reverse_v8i16(<8 x i16> %a) {
298 ; CHECK-LABEL: reverse_v8i16:
300 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
301 ; CHECK-NEXT: vid.v v9
302 ; CHECK-NEXT: vrsub.vi v10, v9, 7
303 ; CHECK-NEXT: vrgather.vv v9, v8, v10
304 ; CHECK-NEXT: vmv.v.v v8, v9
306 %res = call <8 x i16> @llvm.experimental.vector.reverse.v8i16(<8 x i16> %a)
310 define <16 x i16> @reverse_v16i16(<16 x i16> %a) {
311 ; CHECK-LABEL: reverse_v16i16:
313 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
314 ; CHECK-NEXT: vid.v v10
315 ; CHECK-NEXT: vrsub.vi v12, v10, 15
316 ; CHECK-NEXT: vrgather.vv v10, v8, v12
317 ; CHECK-NEXT: vmv.v.v v8, v10
319 %res = call <16 x i16> @llvm.experimental.vector.reverse.v16i16(<16 x i16> %a)
323 define <32 x i16> @reverse_v32i16(<32 x i16> %a) {
324 ; CHECK-LABEL: reverse_v32i16:
326 ; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
327 ; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
328 ; CHECK-NEXT: li a1, 32
329 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
330 ; CHECK-NEXT: vle8.v v12, (a0)
331 ; CHECK-NEXT: vsext.vf2 v16, v12
332 ; CHECK-NEXT: vrgather.vv v12, v8, v16
333 ; CHECK-NEXT: vmv.v.v v8, v12
335 %res = call <32 x i16> @llvm.experimental.vector.reverse.v32i16(<32 x i16> %a)
339 define <1 x i32> @reverse_v1i32(<1 x i32> %a) {
340 ; CHECK-LABEL: reverse_v1i32:
343 %res = call <1 x i32> @llvm.experimental.vector.reverse.v1i32(<1 x i32> %a)
347 define <2 x i32> @reverse_v2i32(<2 x i32> %a) {
348 ; NO-ZVBB-LABEL: reverse_v2i32:
350 ; NO-ZVBB-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
351 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
352 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
353 ; NO-ZVBB-NEXT: vmv1r.v v8, v9
356 ; ZVBB-LABEL: reverse_v2i32:
358 ; ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma
359 ; ZVBB-NEXT: vror.vi v8, v8, 32
361 %res = call <2 x i32> @llvm.experimental.vector.reverse.v2i32(<2 x i32> %a)
365 define <4 x i32> @reverse_v4i32(<4 x i32> %a) {
366 ; CHECK-LABEL: reverse_v4i32:
368 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
369 ; CHECK-NEXT: vid.v v9
370 ; CHECK-NEXT: vrsub.vi v10, v9, 3
371 ; CHECK-NEXT: vrgather.vv v9, v8, v10
372 ; CHECK-NEXT: vmv.v.v v8, v9
374 %res = call <4 x i32> @llvm.experimental.vector.reverse.v4i32(<4 x i32> %a)
378 define <8 x i32> @reverse_v8i32(<8 x i32> %a) {
379 ; CHECK-LABEL: reverse_v8i32:
381 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
382 ; CHECK-NEXT: vid.v v10
383 ; CHECK-NEXT: vrsub.vi v12, v10, 7
384 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
385 ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
386 ; CHECK-NEXT: vmv.v.v v8, v10
388 %res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a)
392 define <16 x i32> @reverse_v16i32(<16 x i32> %a) {
393 ; CHECK-LABEL: reverse_v16i32:
395 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
396 ; CHECK-NEXT: vid.v v12
397 ; CHECK-NEXT: vrsub.vi v16, v12, 15
398 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
399 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
400 ; CHECK-NEXT: vmv.v.v v8, v12
402 %res = call <16 x i32> @llvm.experimental.vector.reverse.v16i32(<16 x i32> %a)
406 define <1 x i64> @reverse_v1i64(<1 x i64> %a) {
407 ; CHECK-LABEL: reverse_v1i64:
410 %res = call <1 x i64> @llvm.experimental.vector.reverse.v1i64(<1 x i64> %a)
414 define <2 x i64> @reverse_v2i64(<2 x i64> %a) {
415 ; CHECK-LABEL: reverse_v2i64:
417 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
418 ; CHECK-NEXT: vslidedown.vi v9, v8, 1
419 ; CHECK-NEXT: vslideup.vi v9, v8, 1
420 ; CHECK-NEXT: vmv.v.v v8, v9
422 %res = call <2 x i64> @llvm.experimental.vector.reverse.v2i64(<2 x i64> %a)
426 define <4 x i64> @reverse_v4i64(<4 x i64> %a) {
427 ; CHECK-LABEL: reverse_v4i64:
429 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
430 ; CHECK-NEXT: vid.v v10
431 ; CHECK-NEXT: vrsub.vi v12, v10, 3
432 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
433 ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
434 ; CHECK-NEXT: vmv.v.v v8, v10
436 %res = call <4 x i64> @llvm.experimental.vector.reverse.v4i64(<4 x i64> %a)
440 define <8 x i64> @reverse_v8i64(<8 x i64> %a) {
441 ; CHECK-LABEL: reverse_v8i64:
443 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
444 ; CHECK-NEXT: vid.v v12
445 ; CHECK-NEXT: vrsub.vi v16, v12, 7
446 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
447 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
448 ; CHECK-NEXT: vmv.v.v v8, v12
450 %res = call <8 x i64> @llvm.experimental.vector.reverse.v8i64(<8 x i64> %a)
455 define <1 x half> @reverse_v1f16(<1 x half> %a) {
456 ; CHECK-LABEL: reverse_v1f16:
459 %res = call <1 x half> @llvm.experimental.vector.reverse.v1f16(<1 x half> %a)
463 define <2 x half> @reverse_v2f16(<2 x half> %a) {
464 ; NO-ZVBB-LABEL: reverse_v2f16:
466 ; NO-ZVBB-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
467 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
468 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
469 ; NO-ZVBB-NEXT: vmv1r.v v8, v9
472 ; ZVBB-LABEL: reverse_v2f16:
474 ; ZVBB-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
475 ; ZVBB-NEXT: vror.vi v8, v8, 16
477 %res = call <2 x half> @llvm.experimental.vector.reverse.v2f16(<2 x half> %a)
481 define <4 x half> @reverse_v4f16(<4 x half> %a) {
482 ; CHECK-LABEL: reverse_v4f16:
484 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
485 ; CHECK-NEXT: vid.v v9
486 ; CHECK-NEXT: vrsub.vi v10, v9, 3
487 ; CHECK-NEXT: vrgather.vv v9, v8, v10
488 ; CHECK-NEXT: vmv1r.v v8, v9
490 %res = call <4 x half> @llvm.experimental.vector.reverse.v4f16(<4 x half> %a)
494 define <8 x half> @reverse_v8f16(<8 x half> %a) {
495 ; CHECK-LABEL: reverse_v8f16:
497 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
498 ; CHECK-NEXT: vid.v v9
499 ; CHECK-NEXT: vrsub.vi v10, v9, 7
500 ; CHECK-NEXT: vrgather.vv v9, v8, v10
501 ; CHECK-NEXT: vmv.v.v v8, v9
503 %res = call <8 x half> @llvm.experimental.vector.reverse.v8f16(<8 x half> %a)
507 define <16 x half> @reverse_v16f16(<16 x half> %a) {
508 ; CHECK-LABEL: reverse_v16f16:
510 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
511 ; CHECK-NEXT: vid.v v10
512 ; CHECK-NEXT: vrsub.vi v12, v10, 15
513 ; CHECK-NEXT: vrgather.vv v10, v8, v12
514 ; CHECK-NEXT: vmv.v.v v8, v10
516 %res = call <16 x half> @llvm.experimental.vector.reverse.v16f16(<16 x half> %a)
520 define <32 x half> @reverse_v32f16(<32 x half> %a) {
521 ; CHECK-LABEL: reverse_v32f16:
523 ; CHECK-NEXT: lui a0, %hi(.LCPI34_0)
524 ; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0)
525 ; CHECK-NEXT: li a1, 32
526 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
527 ; CHECK-NEXT: vle8.v v12, (a0)
528 ; CHECK-NEXT: vsext.vf2 v16, v12
529 ; CHECK-NEXT: vrgather.vv v12, v8, v16
530 ; CHECK-NEXT: vmv.v.v v8, v12
532 %res = call <32 x half> @llvm.experimental.vector.reverse.v32f16(<32 x half> %a)
536 define <1 x float> @reverse_v1f32(<1 x float> %a) {
537 ; CHECK-LABEL: reverse_v1f32:
540 %res = call <1 x float> @llvm.experimental.vector.reverse.v1f32(<1 x float> %a)
544 define <2 x float> @reverse_v2f32(<2 x float> %a) {
545 ; NO-ZVBB-LABEL: reverse_v2f32:
547 ; NO-ZVBB-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
548 ; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1
549 ; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1
550 ; NO-ZVBB-NEXT: vmv1r.v v8, v9
553 ; ZVBB-LABEL: reverse_v2f32:
555 ; ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma
556 ; ZVBB-NEXT: vror.vi v8, v8, 32
558 %res = call <2 x float> @llvm.experimental.vector.reverse.v2f32(<2 x float> %a)
562 define <4 x float> @reverse_v4f32(<4 x float> %a) {
563 ; CHECK-LABEL: reverse_v4f32:
565 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
566 ; CHECK-NEXT: vid.v v9
567 ; CHECK-NEXT: vrsub.vi v10, v9, 3
568 ; CHECK-NEXT: vrgather.vv v9, v8, v10
569 ; CHECK-NEXT: vmv.v.v v8, v9
571 %res = call <4 x float> @llvm.experimental.vector.reverse.v4f32(<4 x float> %a)
575 define <8 x float> @reverse_v8f32(<8 x float> %a) {
576 ; CHECK-LABEL: reverse_v8f32:
578 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
579 ; CHECK-NEXT: vid.v v10
580 ; CHECK-NEXT: vrsub.vi v12, v10, 7
581 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
582 ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
583 ; CHECK-NEXT: vmv.v.v v8, v10
585 %res = call <8 x float> @llvm.experimental.vector.reverse.v8f32(<8 x float> %a)
589 define <16 x float> @reverse_v16f32(<16 x float> %a) {
590 ; CHECK-LABEL: reverse_v16f32:
592 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
593 ; CHECK-NEXT: vid.v v12
594 ; CHECK-NEXT: vrsub.vi v16, v12, 15
595 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
596 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
597 ; CHECK-NEXT: vmv.v.v v8, v12
599 %res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a)
600 ret <16 x float> %res
603 define <1 x double> @reverse_v1f64(<1 x double> %a) {
604 ; CHECK-LABEL: reverse_v1f64:
607 %res = call <1 x double> @llvm.experimental.vector.reverse.v1f64(<1 x double> %a)
608 ret <1 x double> %res
611 define <2 x double> @reverse_v2f64(<2 x double> %a) {
612 ; CHECK-LABEL: reverse_v2f64:
614 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
615 ; CHECK-NEXT: vslidedown.vi v9, v8, 1
616 ; CHECK-NEXT: vslideup.vi v9, v8, 1
617 ; CHECK-NEXT: vmv.v.v v8, v9
619 %res = call <2 x double> @llvm.experimental.vector.reverse.v2f64(<2 x double> %a)
620 ret <2 x double> %res
623 define <4 x double> @reverse_v4f64(<4 x double> %a) {
624 ; CHECK-LABEL: reverse_v4f64:
626 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
627 ; CHECK-NEXT: vid.v v10
628 ; CHECK-NEXT: vrsub.vi v12, v10, 3
629 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
630 ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
631 ; CHECK-NEXT: vmv.v.v v8, v10
633 %res = call <4 x double> @llvm.experimental.vector.reverse.v4f64(<4 x double> %a)
634 ret <4 x double> %res
637 define <8 x double> @reverse_v8f64(<8 x double> %a) {
638 ; CHECK-LABEL: reverse_v8f64:
640 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
641 ; CHECK-NEXT: vid.v v12
642 ; CHECK-NEXT: vrsub.vi v16, v12, 7
643 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
644 ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
645 ; CHECK-NEXT: vmv.v.v v8, v12
647 %res = call <8 x double> @llvm.experimental.vector.reverse.v8f64(<8 x double> %a)
648 ret <8 x double> %res
652 define <3 x i64> @reverse_v3i64(<3 x i64> %a) {
653 ; RV32-BITS-UNKNOWN-LABEL: reverse_v3i64:
654 ; RV32-BITS-UNKNOWN: # %bb.0:
655 ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI44_0)
656 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI44_0)
657 ; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e32, m2, ta, ma
658 ; RV32-BITS-UNKNOWN-NEXT: vle16.v v12, (a0)
659 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
660 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10
661 ; RV32-BITS-UNKNOWN-NEXT: ret
663 ; RV32-BITS-256-LABEL: reverse_v3i64:
664 ; RV32-BITS-256: # %bb.0:
665 ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI44_0)
666 ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI44_0)
667 ; RV32-BITS-256-NEXT: vsetivli zero, 8, e32, m2, ta, ma
668 ; RV32-BITS-256-NEXT: vle16.v v12, (a0)
669 ; RV32-BITS-256-NEXT: vrgatherei16.vv v10, v8, v12
670 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10
671 ; RV32-BITS-256-NEXT: ret
673 ; RV32-BITS-512-LABEL: reverse_v3i64:
674 ; RV32-BITS-512: # %bb.0:
675 ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI44_0)
676 ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI44_0)
677 ; RV32-BITS-512-NEXT: vsetivli zero, 8, e32, m2, ta, ma
678 ; RV32-BITS-512-NEXT: vle16.v v12, (a0)
679 ; RV32-BITS-512-NEXT: vrgatherei16.vv v10, v8, v12
680 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10
681 ; RV32-BITS-512-NEXT: ret
683 ; RV64-BITS-UNKNOWN-LABEL: reverse_v3i64:
684 ; RV64-BITS-UNKNOWN: # %bb.0:
685 ; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
686 ; RV64-BITS-UNKNOWN-NEXT: vid.v v10
687 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 2
688 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
689 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12
690 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10
691 ; RV64-BITS-UNKNOWN-NEXT: ret
693 ; RV64-BITS-256-LABEL: reverse_v3i64:
694 ; RV64-BITS-256: # %bb.0:
695 ; RV64-BITS-256-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
696 ; RV64-BITS-256-NEXT: vid.v v10
697 ; RV64-BITS-256-NEXT: vrsub.vi v12, v10, 2
698 ; RV64-BITS-256-NEXT: vsetvli zero, zero, e64, m2, ta, ma
699 ; RV64-BITS-256-NEXT: vrgatherei16.vv v10, v8, v12
700 ; RV64-BITS-256-NEXT: vmv.v.v v8, v10
701 ; RV64-BITS-256-NEXT: ret
703 ; RV64-BITS-512-LABEL: reverse_v3i64:
704 ; RV64-BITS-512: # %bb.0:
705 ; RV64-BITS-512-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
706 ; RV64-BITS-512-NEXT: vid.v v10
707 ; RV64-BITS-512-NEXT: vrsub.vi v12, v10, 2
708 ; RV64-BITS-512-NEXT: vsetvli zero, zero, e64, m2, ta, ma
709 ; RV64-BITS-512-NEXT: vrgatherei16.vv v10, v8, v12
710 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10
711 ; RV64-BITS-512-NEXT: ret
713 ; RV32-ZVBB-LABEL: reverse_v3i64:
714 ; RV32-ZVBB: # %bb.0:
715 ; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI44_0)
716 ; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI44_0)
717 ; RV32-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
718 ; RV32-ZVBB-NEXT: vle16.v v12, (a0)
719 ; RV32-ZVBB-NEXT: vrgatherei16.vv v10, v8, v12
720 ; RV32-ZVBB-NEXT: vmv.v.v v8, v10
721 ; RV32-ZVBB-NEXT: ret
723 ; RV64-ZVBB-LABEL: reverse_v3i64:
724 ; RV64-ZVBB: # %bb.0:
725 ; RV64-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
726 ; RV64-ZVBB-NEXT: vid.v v10
727 ; RV64-ZVBB-NEXT: vrsub.vi v12, v10, 2
728 ; RV64-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma
729 ; RV64-ZVBB-NEXT: vrgatherei16.vv v10, v8, v12
730 ; RV64-ZVBB-NEXT: vmv.v.v v8, v10
731 ; RV64-ZVBB-NEXT: ret
732 %res = call <3 x i64> @llvm.experimental.vector.reverse.v3i64(<3 x i64> %a)
736 define <6 x i64> @reverse_v6i64(<6 x i64> %a) {
737 ; RV32-BITS-UNKNOWN-LABEL: reverse_v6i64:
738 ; RV32-BITS-UNKNOWN: # %bb.0:
739 ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI45_0)
740 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI45_0)
741 ; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e32, m4, ta, ma
742 ; RV32-BITS-UNKNOWN-NEXT: vle16.v v16, (a0)
743 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
744 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12
745 ; RV32-BITS-UNKNOWN-NEXT: ret
747 ; RV32-BITS-256-LABEL: reverse_v6i64:
748 ; RV32-BITS-256: # %bb.0:
749 ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI45_0)
750 ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI45_0)
751 ; RV32-BITS-256-NEXT: vsetivli zero, 16, e32, m4, ta, ma
752 ; RV32-BITS-256-NEXT: vle16.v v16, (a0)
753 ; RV32-BITS-256-NEXT: vrgatherei16.vv v12, v8, v16
754 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12
755 ; RV32-BITS-256-NEXT: ret
757 ; RV32-BITS-512-LABEL: reverse_v6i64:
758 ; RV32-BITS-512: # %bb.0:
759 ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI45_0)
760 ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI45_0)
761 ; RV32-BITS-512-NEXT: vsetivli zero, 16, e32, m4, ta, ma
762 ; RV32-BITS-512-NEXT: vle16.v v16, (a0)
763 ; RV32-BITS-512-NEXT: vrgatherei16.vv v12, v8, v16
764 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12
765 ; RV32-BITS-512-NEXT: ret
767 ; RV64-BITS-UNKNOWN-LABEL: reverse_v6i64:
768 ; RV64-BITS-UNKNOWN: # %bb.0:
769 ; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
770 ; RV64-BITS-UNKNOWN-NEXT: vid.v v12
771 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 5
772 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
773 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16
774 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12
775 ; RV64-BITS-UNKNOWN-NEXT: ret
777 ; RV64-BITS-256-LABEL: reverse_v6i64:
778 ; RV64-BITS-256: # %bb.0:
779 ; RV64-BITS-256-NEXT: vsetivli zero, 8, e16, m1, ta, ma
780 ; RV64-BITS-256-NEXT: vid.v v12
781 ; RV64-BITS-256-NEXT: vrsub.vi v16, v12, 5
782 ; RV64-BITS-256-NEXT: vsetvli zero, zero, e64, m4, ta, ma
783 ; RV64-BITS-256-NEXT: vrgatherei16.vv v12, v8, v16
784 ; RV64-BITS-256-NEXT: vmv.v.v v8, v12
785 ; RV64-BITS-256-NEXT: ret
787 ; RV64-BITS-512-LABEL: reverse_v6i64:
788 ; RV64-BITS-512: # %bb.0:
789 ; RV64-BITS-512-NEXT: vsetivli zero, 8, e16, m1, ta, ma
790 ; RV64-BITS-512-NEXT: vid.v v12
791 ; RV64-BITS-512-NEXT: vrsub.vi v16, v12, 5
792 ; RV64-BITS-512-NEXT: vsetvli zero, zero, e64, m4, ta, ma
793 ; RV64-BITS-512-NEXT: vrgatherei16.vv v12, v8, v16
794 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12
795 ; RV64-BITS-512-NEXT: ret
797 ; RV32-ZVBB-LABEL: reverse_v6i64:
798 ; RV32-ZVBB: # %bb.0:
799 ; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI45_0)
800 ; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI45_0)
801 ; RV32-ZVBB-NEXT: vsetivli zero, 16, e32, m4, ta, ma
802 ; RV32-ZVBB-NEXT: vle16.v v16, (a0)
803 ; RV32-ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
804 ; RV32-ZVBB-NEXT: vmv.v.v v8, v12
805 ; RV32-ZVBB-NEXT: ret
807 ; RV64-ZVBB-LABEL: reverse_v6i64:
808 ; RV64-ZVBB: # %bb.0:
809 ; RV64-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
810 ; RV64-ZVBB-NEXT: vid.v v12
811 ; RV64-ZVBB-NEXT: vrsub.vi v16, v12, 5
812 ; RV64-ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
813 ; RV64-ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
814 ; RV64-ZVBB-NEXT: vmv.v.v v8, v12
815 ; RV64-ZVBB-NEXT: ret
816 %res = call <6 x i64> @llvm.experimental.vector.reverse.v6i64(<6 x i64> %a)
820 define <12 x i64> @reverse_v12i64(<12 x i64> %a) {
821 ; RV32-BITS-UNKNOWN-LABEL: reverse_v12i64:
822 ; RV32-BITS-UNKNOWN: # %bb.0:
823 ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI46_0)
824 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI46_0)
825 ; RV32-BITS-UNKNOWN-NEXT: li a1, 32
826 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
827 ; RV32-BITS-UNKNOWN-NEXT: vle16.v v24, (a0)
828 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v8, v24
829 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v16
830 ; RV32-BITS-UNKNOWN-NEXT: ret
832 ; RV32-BITS-256-LABEL: reverse_v12i64:
833 ; RV32-BITS-256: # %bb.0:
834 ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI46_0)
835 ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI46_0)
836 ; RV32-BITS-256-NEXT: li a1, 32
837 ; RV32-BITS-256-NEXT: vsetvli zero, a1, e32, m8, ta, ma
838 ; RV32-BITS-256-NEXT: vle16.v v24, (a0)
839 ; RV32-BITS-256-NEXT: vrgatherei16.vv v16, v8, v24
840 ; RV32-BITS-256-NEXT: vmv.v.v v8, v16
841 ; RV32-BITS-256-NEXT: ret
843 ; RV32-BITS-512-LABEL: reverse_v12i64:
844 ; RV32-BITS-512: # %bb.0:
845 ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI46_0)
846 ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI46_0)
847 ; RV32-BITS-512-NEXT: li a1, 32
848 ; RV32-BITS-512-NEXT: vsetvli zero, a1, e32, m8, ta, ma
849 ; RV32-BITS-512-NEXT: vle16.v v24, (a0)
850 ; RV32-BITS-512-NEXT: vrgatherei16.vv v16, v8, v24
851 ; RV32-BITS-512-NEXT: vmv.v.v v8, v16
852 ; RV32-BITS-512-NEXT: ret
854 ; RV64-BITS-UNKNOWN-LABEL: reverse_v12i64:
855 ; RV64-BITS-UNKNOWN: # %bb.0:
856 ; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
857 ; RV64-BITS-UNKNOWN-NEXT: vid.v v16
858 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v24, v16, 11
859 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
860 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v8, v24
861 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v16
862 ; RV64-BITS-UNKNOWN-NEXT: ret
864 ; RV64-BITS-256-LABEL: reverse_v12i64:
865 ; RV64-BITS-256: # %bb.0:
866 ; RV64-BITS-256-NEXT: vsetivli zero, 16, e16, m2, ta, ma
867 ; RV64-BITS-256-NEXT: vid.v v16
868 ; RV64-BITS-256-NEXT: vrsub.vi v24, v16, 11
869 ; RV64-BITS-256-NEXT: vsetvli zero, zero, e64, m8, ta, ma
870 ; RV64-BITS-256-NEXT: vrgatherei16.vv v16, v8, v24
871 ; RV64-BITS-256-NEXT: vmv.v.v v8, v16
872 ; RV64-BITS-256-NEXT: ret
874 ; RV64-BITS-512-LABEL: reverse_v12i64:
875 ; RV64-BITS-512: # %bb.0:
876 ; RV64-BITS-512-NEXT: vsetivli zero, 16, e16, m2, ta, ma
877 ; RV64-BITS-512-NEXT: vid.v v16
878 ; RV64-BITS-512-NEXT: vrsub.vi v24, v16, 11
879 ; RV64-BITS-512-NEXT: vsetvli zero, zero, e64, m8, ta, ma
880 ; RV64-BITS-512-NEXT: vrgatherei16.vv v16, v8, v24
881 ; RV64-BITS-512-NEXT: vmv.v.v v8, v16
882 ; RV64-BITS-512-NEXT: ret
884 ; RV32-ZVBB-LABEL: reverse_v12i64:
885 ; RV32-ZVBB: # %bb.0:
886 ; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI46_0)
887 ; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI46_0)
888 ; RV32-ZVBB-NEXT: li a1, 32
889 ; RV32-ZVBB-NEXT: vsetvli zero, a1, e32, m8, ta, ma
890 ; RV32-ZVBB-NEXT: vle16.v v24, (a0)
891 ; RV32-ZVBB-NEXT: vrgatherei16.vv v16, v8, v24
892 ; RV32-ZVBB-NEXT: vmv.v.v v8, v16
893 ; RV32-ZVBB-NEXT: ret
895 ; RV64-ZVBB-LABEL: reverse_v12i64:
896 ; RV64-ZVBB: # %bb.0:
897 ; RV64-ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma
898 ; RV64-ZVBB-NEXT: vid.v v16
899 ; RV64-ZVBB-NEXT: vrsub.vi v24, v16, 11
900 ; RV64-ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
901 ; RV64-ZVBB-NEXT: vrgatherei16.vv v16, v8, v24
902 ; RV64-ZVBB-NEXT: vmv.v.v v8, v16
903 ; RV64-ZVBB-NEXT: ret
904 %res = call <12 x i64> @llvm.experimental.vector.reverse.v12i64(<12 x i64> %a)
908 declare <2 x i1> @llvm.experimental.vector.reverse.v2i1(<2 x i1>)
909 declare <4 x i1> @llvm.experimental.vector.reverse.v4i1(<4 x i1>)
910 declare <8 x i1> @llvm.experimental.vector.reverse.v8i1(<8 x i1>)
911 declare <16 x i1> @llvm.experimental.vector.reverse.v16i1(<16 x i1>)
912 declare <32 x i1> @llvm.experimental.vector.reverse.v32i1(<32 x i1>)
913 declare <64 x i1> @llvm.experimental.vector.reverse.v64i1(<64 x i1>)
914 declare <128 x i1> @llvm.experimental.vector.reverse.v128i1(<128 x i1>)
915 declare <1 x i8> @llvm.experimental.vector.reverse.v1i8(<1 x i8>)
916 declare <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8>)
917 declare <4 x i8> @llvm.experimental.vector.reverse.v4i8(<4 x i8>)
918 declare <8 x i8> @llvm.experimental.vector.reverse.v8i8(<8 x i8>)
919 declare <16 x i8> @llvm.experimental.vector.reverse.v16i8(<16 x i8>)
920 declare <32 x i8> @llvm.experimental.vector.reverse.v32i8(<32 x i8>)
921 declare <64 x i8> @llvm.experimental.vector.reverse.v64i8(<64 x i8>)
922 declare <1 x i16> @llvm.experimental.vector.reverse.v1i16(<1 x i16>)
923 declare <2 x i16> @llvm.experimental.vector.reverse.v2i16(<2 x i16>)
924 declare <4 x i16> @llvm.experimental.vector.reverse.v4i16(<4 x i16>)
925 declare <8 x i16> @llvm.experimental.vector.reverse.v8i16(<8 x i16>)
926 declare <16 x i16> @llvm.experimental.vector.reverse.v16i16(<16 x i16>)
927 declare <32 x i16> @llvm.experimental.vector.reverse.v32i16(<32 x i16>)
928 declare <1 x i32> @llvm.experimental.vector.reverse.v1i32(<1 x i32>)
929 declare <2 x i32> @llvm.experimental.vector.reverse.v2i32(<2 x i32>)
930 declare <4 x i32> @llvm.experimental.vector.reverse.v4i32(<4 x i32>)
931 declare <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32>)
932 declare <16 x i32> @llvm.experimental.vector.reverse.v16i32(<16 x i32>)
933 declare <1 x i64> @llvm.experimental.vector.reverse.v1i64(<1 x i64>)
934 declare <2 x i64> @llvm.experimental.vector.reverse.v2i64(<2 x i64>)
935 declare <4 x i64> @llvm.experimental.vector.reverse.v4i64(<4 x i64>)
936 declare <8 x i64> @llvm.experimental.vector.reverse.v8i64(<8 x i64>)
937 declare <1 x half> @llvm.experimental.vector.reverse.v1f16(<1 x half>)
938 declare <2 x half> @llvm.experimental.vector.reverse.v2f16(<2 x half>)
939 declare <4 x half> @llvm.experimental.vector.reverse.v4f16(<4 x half>)
940 declare <8 x half> @llvm.experimental.vector.reverse.v8f16(<8 x half>)
941 declare <16 x half> @llvm.experimental.vector.reverse.v16f16(<16 x half>)
942 declare <32 x half> @llvm.experimental.vector.reverse.v32f16(<32 x half>)
943 declare <1 x float> @llvm.experimental.vector.reverse.v1f32(<1 x float>)
944 declare <2 x float> @llvm.experimental.vector.reverse.v2f32(<2 x float>)
945 declare <4 x float> @llvm.experimental.vector.reverse.v4f32(<4 x float>)
946 declare <8 x float> @llvm.experimental.vector.reverse.v8f32(<8 x float>)
947 declare <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float>)
948 declare <1 x double> @llvm.experimental.vector.reverse.v1f64(<1 x double>)
949 declare <2 x double> @llvm.experimental.vector.reverse.v2f64(<2 x double>)
950 declare <4 x double> @llvm.experimental.vector.reverse.v4f64(<4 x double>)
951 declare <8 x double> @llvm.experimental.vector.reverse.v8f64(<8 x double>)
952 declare <3 x i64> @llvm.experimental.vector.reverse.v3i64(<3 x i64>)
953 declare <6 x i64> @llvm.experimental.vector.reverse.v6i64(<6 x i64>)
954 declare <12 x i64> @llvm.experimental.vector.reverse.v12i64(<12 x i64>)