1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
3 ; RUN: < %s | FileCheck %s
5 define <2 x i1> @test_vp_reverse_v2i1_masked(<2 x i1> %src, <2 x i1> %mask, i32 zeroext %evl) {
6 ; CHECK-LABEL: test_vp_reverse_v2i1_masked:
8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
9 ; CHECK-NEXT: vmv.v.i v9, 0
10 ; CHECK-NEXT: addi a0, a0, -1
11 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
12 ; CHECK-NEXT: vmv1r.v v0, v8
13 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
14 ; CHECK-NEXT: vid.v v10, v0.t
15 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
16 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
17 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
18 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
20 %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> %mask, i32 %evl)
24 define <2 x i1> @test_vp_reverse_v2i1(<2 x i1> %src, i32 zeroext %evl) {
25 ; CHECK-LABEL: test_vp_reverse_v2i1:
27 ; CHECK-NEXT: addi a1, a0, -1
28 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
29 ; CHECK-NEXT: vid.v v8
30 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
31 ; CHECK-NEXT: vmv.v.i v9, 0
32 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
33 ; CHECK-NEXT: vrsub.vx v8, v8, a1
34 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
35 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
36 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
37 ; CHECK-NEXT: vmsne.vi v0, v10, 0
40 %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> splat (i1 1), i32 %evl)
44 define <4 x i1> @test_vp_reverse_v4i1_masked(<4 x i1> %src, <4 x i1> %mask, i32 zeroext %evl) {
45 ; CHECK-LABEL: test_vp_reverse_v4i1_masked:
47 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
48 ; CHECK-NEXT: vmv.v.i v9, 0
49 ; CHECK-NEXT: addi a0, a0, -1
50 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
51 ; CHECK-NEXT: vmv1r.v v0, v8
52 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
53 ; CHECK-NEXT: vid.v v10, v0.t
54 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
55 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
56 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
57 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
59 %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> %mask, i32 %evl)
63 define <4 x i1> @test_vp_reverse_v4i1(<4 x i1> %src, i32 zeroext %evl) {
64 ; CHECK-LABEL: test_vp_reverse_v4i1:
66 ; CHECK-NEXT: addi a1, a0, -1
67 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
68 ; CHECK-NEXT: vid.v v8
69 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
70 ; CHECK-NEXT: vmv.v.i v9, 0
71 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
72 ; CHECK-NEXT: vrsub.vx v8, v8, a1
73 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
74 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
75 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
76 ; CHECK-NEXT: vmsne.vi v0, v10, 0
79 %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> splat (i1 1), i32 %evl)
83 define <8 x i1> @test_vp_reverse_v8i1_masked(<8 x i1> %src, <8 x i1> %mask, i32 zeroext %evl) {
84 ; CHECK-LABEL: test_vp_reverse_v8i1_masked:
86 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
87 ; CHECK-NEXT: vmv.v.i v9, 0
88 ; CHECK-NEXT: addi a0, a0, -1
89 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
90 ; CHECK-NEXT: vmv1r.v v0, v8
91 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
92 ; CHECK-NEXT: vid.v v10, v0.t
93 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
94 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
95 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
96 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
98 %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> %mask, i32 %evl)
102 define <8 x i1> @test_vp_reverse_v8i1(<8 x i1> %src, i32 zeroext %evl) {
103 ; CHECK-LABEL: test_vp_reverse_v8i1:
105 ; CHECK-NEXT: addi a1, a0, -1
106 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
107 ; CHECK-NEXT: vid.v v8
108 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
109 ; CHECK-NEXT: vmv.v.i v9, 0
110 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
111 ; CHECK-NEXT: vrsub.vx v8, v8, a1
112 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
113 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
114 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
115 ; CHECK-NEXT: vmsne.vi v0, v10, 0
118 %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> splat (i1 1), i32 %evl)
122 define <16 x i1> @test_vp_reverse_v16i1_masked(<16 x i1> %src, <16 x i1> %mask, i32 zeroext %evl) {
123 ; CHECK-LABEL: test_vp_reverse_v16i1_masked:
125 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
126 ; CHECK-NEXT: vmv.v.i v9, 0
127 ; CHECK-NEXT: addi a0, a0, -1
128 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
129 ; CHECK-NEXT: vmv1r.v v0, v8
130 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
131 ; CHECK-NEXT: vid.v v10, v0.t
132 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
133 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
134 ; CHECK-NEXT: vrgatherei16.vv v12, v9, v10, v0.t
135 ; CHECK-NEXT: vmsne.vi v0, v12, 0, v0.t
137 %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> %mask, i32 %evl)
141 define <16 x i1> @test_vp_reverse_v16i1(<16 x i1> %src, i32 zeroext %evl) {
142 ; CHECK-LABEL: test_vp_reverse_v16i1:
144 ; CHECK-NEXT: addi a1, a0, -1
145 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
146 ; CHECK-NEXT: vid.v v8
147 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
148 ; CHECK-NEXT: vmv.v.i v10, 0
149 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
150 ; CHECK-NEXT: vrsub.vx v8, v8, a1
151 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
152 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
153 ; CHECK-NEXT: vrgatherei16.vv v11, v10, v8
154 ; CHECK-NEXT: vmsne.vi v0, v11, 0
157 %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> splat (i1 1), i32 %evl)
161 declare <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1>,<2 x i1>,i32)
162 declare <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1>,<4 x i1>,i32)
163 declare <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1>,<8 x i1>,i32)
164 declare <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1>,<16 x i1>,i32)