1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
3 ; RUN: < %s | FileCheck %s
5 define <2 x i1> @test_vp_reverse_v2i1_masked(<2 x i1> %src, <2 x i1> %mask, i32 zeroext %evl) {
6 ; CHECK-LABEL: test_vp_reverse_v2i1_masked:
8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
9 ; CHECK-NEXT: vmv.v.i v9, 0
10 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
11 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
12 ; CHECK-NEXT: vmv1r.v v0, v8
13 ; CHECK-NEXT: vid.v v10, v0.t
14 ; CHECK-NEXT: addi a0, a0, -1
15 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
16 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
17 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
18 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
20 %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> %mask, i32 %evl)
24 define <2 x i1> @test_vp_reverse_v2i1(<2 x i1> %src, i32 zeroext %evl) {
25 ; CHECK-LABEL: test_vp_reverse_v2i1:
27 ; CHECK-NEXT: addi a1, a0, -1
28 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
29 ; CHECK-NEXT: vid.v v8
30 ; CHECK-NEXT: vrsub.vx v8, v8, a1
31 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
32 ; CHECK-NEXT: vmv.v.i v9, 0
33 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
34 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
35 ; CHECK-NEXT: vmsne.vi v0, v10, 0
37 %head = insertelement <2 x i1> undef, i1 1, i32 0
38 %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
40 %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> %allones, i32 %evl)
44 define <4 x i1> @test_vp_reverse_v4i1_masked(<4 x i1> %src, <4 x i1> %mask, i32 zeroext %evl) {
45 ; CHECK-LABEL: test_vp_reverse_v4i1_masked:
47 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
48 ; CHECK-NEXT: vmv.v.i v9, 0
49 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
50 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
51 ; CHECK-NEXT: vmv1r.v v0, v8
52 ; CHECK-NEXT: vid.v v10, v0.t
53 ; CHECK-NEXT: addi a0, a0, -1
54 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
55 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
56 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
57 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
59 %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> %mask, i32 %evl)
63 define <4 x i1> @test_vp_reverse_v4i1(<4 x i1> %src, i32 zeroext %evl) {
64 ; CHECK-LABEL: test_vp_reverse_v4i1:
66 ; CHECK-NEXT: addi a1, a0, -1
67 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
68 ; CHECK-NEXT: vid.v v8
69 ; CHECK-NEXT: vrsub.vx v8, v8, a1
70 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
71 ; CHECK-NEXT: vmv.v.i v9, 0
72 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
73 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
74 ; CHECK-NEXT: vmsne.vi v0, v10, 0
76 %head = insertelement <4 x i1> undef, i1 1, i32 0
77 %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
79 %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> %allones, i32 %evl)
83 define <8 x i1> @test_vp_reverse_v8i1_masked(<8 x i1> %src, <8 x i1> %mask, i32 zeroext %evl) {
84 ; CHECK-LABEL: test_vp_reverse_v8i1_masked:
86 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
87 ; CHECK-NEXT: vmv.v.i v9, 0
88 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
89 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
90 ; CHECK-NEXT: vmv1r.v v0, v8
91 ; CHECK-NEXT: vid.v v10, v0.t
92 ; CHECK-NEXT: addi a0, a0, -1
93 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
94 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
95 ; CHECK-NEXT: vrgatherei16.vv v11, v9, v10, v0.t
96 ; CHECK-NEXT: vmsne.vi v0, v11, 0, v0.t
98 %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> %mask, i32 %evl)
102 define <8 x i1> @test_vp_reverse_v8i1(<8 x i1> %src, i32 zeroext %evl) {
103 ; CHECK-LABEL: test_vp_reverse_v8i1:
105 ; CHECK-NEXT: addi a1, a0, -1
106 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
107 ; CHECK-NEXT: vid.v v8
108 ; CHECK-NEXT: vrsub.vx v8, v8, a1
109 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
110 ; CHECK-NEXT: vmv.v.i v9, 0
111 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
112 ; CHECK-NEXT: vrgatherei16.vv v10, v9, v8
113 ; CHECK-NEXT: vmsne.vi v0, v10, 0
115 %head = insertelement <8 x i1> undef, i1 1, i32 0
116 %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
118 %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> %allones, i32 %evl)
122 define <16 x i1> @test_vp_reverse_v16i1_masked(<16 x i1> %src, <16 x i1> %mask, i32 zeroext %evl) {
123 ; CHECK-LABEL: test_vp_reverse_v16i1_masked:
125 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
126 ; CHECK-NEXT: vmv.v.i v9, 0
127 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
128 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
129 ; CHECK-NEXT: vmv1r.v v0, v8
130 ; CHECK-NEXT: vid.v v10, v0.t
131 ; CHECK-NEXT: addi a0, a0, -1
132 ; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
133 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
134 ; CHECK-NEXT: vrgatherei16.vv v12, v9, v10, v0.t
135 ; CHECK-NEXT: vmsne.vi v0, v12, 0, v0.t
137 %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> %mask, i32 %evl)
141 define <16 x i1> @test_vp_reverse_v16i1(<16 x i1> %src, i32 zeroext %evl) {
142 ; CHECK-LABEL: test_vp_reverse_v16i1:
144 ; CHECK-NEXT: addi a1, a0, -1
145 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
146 ; CHECK-NEXT: vid.v v8
147 ; CHECK-NEXT: vrsub.vx v8, v8, a1
148 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
149 ; CHECK-NEXT: vmv.v.i v10, 0
150 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
151 ; CHECK-NEXT: vrgatherei16.vv v11, v10, v8
152 ; CHECK-NEXT: vmsne.vi v0, v11, 0
154 %head = insertelement <16 x i1> undef, i1 1, i32 0
155 %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
157 %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> %allones, i32 %evl)
161 declare <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1>,<2 x i1>,i32)
162 declare <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1>,<4 x i1>,i32)
163 declare <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1>,<8 x i1>,i32)
164 declare <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1>,<16 x i1>,i32)