1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 define <vscale x 1 x i8> @test_vssra_vv_i8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
6 ; CHECK-LABEL: test_vssra_vv_i8mf8:
7 ; CHECK: # %bb.0: # %entry
8 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
9 ; CHECK-NEXT: csrwi vxrm, 0
10 ; CHECK-NEXT: vssra.vv v8, v8, v9
13 %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 0, i64 %vl)
14 ret <vscale x 1 x i8> %0
17 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i64 immarg, i64)
19 define <vscale x 1 x i8> @test_vssra_vx_i8mf8(<vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
20 ; CHECK-LABEL: test_vssra_vx_i8mf8:
21 ; CHECK: # %bb.0: # %entry
22 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
23 ; CHECK-NEXT: csrwi vxrm, 0
24 ; CHECK-NEXT: vssra.vx v8, v8, a0
27 %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, i64 0, i64 %vl)
28 ret <vscale x 1 x i8> %0
31 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, i64 immarg, i64)
33 define <vscale x 2 x i8> @test_vssra_vv_i8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
34 ; CHECK-LABEL: test_vssra_vv_i8mf4:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
37 ; CHECK-NEXT: csrwi vxrm, 0
38 ; CHECK-NEXT: vssra.vv v8, v8, v9
41 %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 0, i64 %vl)
42 ret <vscale x 2 x i8> %0
45 declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i64 immarg, i64)
47 define <vscale x 2 x i8> @test_vssra_vx_i8mf4(<vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
48 ; CHECK-LABEL: test_vssra_vx_i8mf4:
49 ; CHECK: # %bb.0: # %entry
50 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
51 ; CHECK-NEXT: csrwi vxrm, 0
52 ; CHECK-NEXT: vssra.vx v8, v8, a0
55 %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, i64 0, i64 %vl)
56 ret <vscale x 2 x i8> %0
59 declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, i64 immarg, i64)
61 define <vscale x 4 x i8> @test_vssra_vv_i8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
62 ; CHECK-LABEL: test_vssra_vv_i8mf2:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
65 ; CHECK-NEXT: csrwi vxrm, 0
66 ; CHECK-NEXT: vssra.vv v8, v8, v9
69 %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 0, i64 %vl)
70 ret <vscale x 4 x i8> %0
73 declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i64 immarg, i64)
75 define <vscale x 4 x i8> @test_vssra_vx_i8mf2(<vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
76 ; CHECK-LABEL: test_vssra_vx_i8mf2:
77 ; CHECK: # %bb.0: # %entry
78 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
79 ; CHECK-NEXT: csrwi vxrm, 0
80 ; CHECK-NEXT: vssra.vx v8, v8, a0
83 %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, i64 0, i64 %vl)
84 ret <vscale x 4 x i8> %0
87 declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, i64 immarg, i64)
89 define <vscale x 8 x i8> @test_vssra_vv_i8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
90 ; CHECK-LABEL: test_vssra_vv_i8m1:
91 ; CHECK: # %bb.0: # %entry
92 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
93 ; CHECK-NEXT: csrwi vxrm, 0
94 ; CHECK-NEXT: vssra.vv v8, v8, v9
97 %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 0, i64 %vl)
98 ret <vscale x 8 x i8> %0
101 declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i64 immarg, i64)
103 define <vscale x 8 x i8> @test_vssra_vx_i8m1(<vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
104 ; CHECK-LABEL: test_vssra_vx_i8m1:
105 ; CHECK: # %bb.0: # %entry
106 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
107 ; CHECK-NEXT: csrwi vxrm, 0
108 ; CHECK-NEXT: vssra.vx v8, v8, a0
111 %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, i64 0, i64 %vl)
112 ret <vscale x 8 x i8> %0
115 declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64 immarg, i64)
117 define <vscale x 16 x i8> @test_vssra_vv_i8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
118 ; CHECK-LABEL: test_vssra_vv_i8m2:
119 ; CHECK: # %bb.0: # %entry
120 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
121 ; CHECK-NEXT: csrwi vxrm, 0
122 ; CHECK-NEXT: vssra.vv v8, v8, v10
125 %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 0, i64 %vl)
126 ret <vscale x 16 x i8> %0
129 declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i64 immarg, i64)
131 define <vscale x 16 x i8> @test_vssra_vx_i8m2(<vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
132 ; CHECK-LABEL: test_vssra_vx_i8m2:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
135 ; CHECK-NEXT: csrwi vxrm, 0
136 ; CHECK-NEXT: vssra.vx v8, v8, a0
139 %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, i64 0, i64 %vl)
140 ret <vscale x 16 x i8> %0
143 declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, i64 immarg, i64)
145 define <vscale x 32 x i8> @test_vssra_vv_i8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
146 ; CHECK-LABEL: test_vssra_vv_i8m4:
147 ; CHECK: # %bb.0: # %entry
148 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
149 ; CHECK-NEXT: csrwi vxrm, 0
150 ; CHECK-NEXT: vssra.vv v8, v8, v12
153 %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 0, i64 %vl)
154 ret <vscale x 32 x i8> %0
157 declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64 immarg, i64)
159 define <vscale x 32 x i8> @test_vssra_vx_i8m4(<vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
160 ; CHECK-LABEL: test_vssra_vx_i8m4:
161 ; CHECK: # %bb.0: # %entry
162 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
163 ; CHECK-NEXT: csrwi vxrm, 0
164 ; CHECK-NEXT: vssra.vx v8, v8, a0
167 %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, i64 0, i64 %vl)
168 ret <vscale x 32 x i8> %0
171 declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, i64 immarg, i64)
173 define <vscale x 64 x i8> @test_vssra_vv_i8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
174 ; CHECK-LABEL: test_vssra_vv_i8m8:
175 ; CHECK: # %bb.0: # %entry
176 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
177 ; CHECK-NEXT: csrwi vxrm, 0
178 ; CHECK-NEXT: vssra.vv v8, v8, v16
181 %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 0, i64 %vl)
182 ret <vscale x 64 x i8> %0
185 declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i64 immarg, i64)
187 define <vscale x 64 x i8> @test_vssra_vx_i8m8(<vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
188 ; CHECK-LABEL: test_vssra_vx_i8m8:
189 ; CHECK: # %bb.0: # %entry
190 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
191 ; CHECK-NEXT: csrwi vxrm, 0
192 ; CHECK-NEXT: vssra.vx v8, v8, a0
195 %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, i64 0, i64 %vl)
196 ret <vscale x 64 x i8> %0
199 declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, i64 immarg, i64)
201 define <vscale x 1 x i16> @test_vssra_vv_i16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
202 ; CHECK-LABEL: test_vssra_vv_i16mf4:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
205 ; CHECK-NEXT: csrwi vxrm, 0
206 ; CHECK-NEXT: vssra.vv v8, v8, v9
209 %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 0, i64 %vl)
210 ret <vscale x 1 x i16> %0
213 declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i64 immarg, i64)
215 define <vscale x 1 x i16> @test_vssra_vx_i16mf4(<vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
216 ; CHECK-LABEL: test_vssra_vx_i16mf4:
217 ; CHECK: # %bb.0: # %entry
218 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
219 ; CHECK-NEXT: csrwi vxrm, 0
220 ; CHECK-NEXT: vssra.vx v8, v8, a0
223 %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, i64 0, i64 %vl)
224 ret <vscale x 1 x i16> %0
227 declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, i64 immarg, i64)
229 define <vscale x 2 x i16> @test_vssra_vv_i16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
230 ; CHECK-LABEL: test_vssra_vv_i16mf2:
231 ; CHECK: # %bb.0: # %entry
232 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
233 ; CHECK-NEXT: csrwi vxrm, 0
234 ; CHECK-NEXT: vssra.vv v8, v8, v9
237 %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 0, i64 %vl)
238 ret <vscale x 2 x i16> %0
241 declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i64 immarg, i64)
243 define <vscale x 2 x i16> @test_vssra_vx_i16mf2(<vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
244 ; CHECK-LABEL: test_vssra_vx_i16mf2:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
247 ; CHECK-NEXT: csrwi vxrm, 0
248 ; CHECK-NEXT: vssra.vx v8, v8, a0
251 %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, i64 0, i64 %vl)
252 ret <vscale x 2 x i16> %0
255 declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, i64 immarg, i64)
257 define <vscale x 4 x i16> @test_vssra_vv_i16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
258 ; CHECK-LABEL: test_vssra_vv_i16m1:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
261 ; CHECK-NEXT: csrwi vxrm, 0
262 ; CHECK-NEXT: vssra.vv v8, v8, v9
265 %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 0, i64 %vl)
266 ret <vscale x 4 x i16> %0
269 declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i64 immarg, i64)
271 define <vscale x 4 x i16> @test_vssra_vx_i16m1(<vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
272 ; CHECK-LABEL: test_vssra_vx_i16m1:
273 ; CHECK: # %bb.0: # %entry
274 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
275 ; CHECK-NEXT: csrwi vxrm, 0
276 ; CHECK-NEXT: vssra.vx v8, v8, a0
279 %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, i64 0, i64 %vl)
280 ret <vscale x 4 x i16> %0
283 declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, i64 immarg, i64)
285 define <vscale x 8 x i16> @test_vssra_vv_i16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
286 ; CHECK-LABEL: test_vssra_vv_i16m2:
287 ; CHECK: # %bb.0: # %entry
288 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
289 ; CHECK-NEXT: csrwi vxrm, 0
290 ; CHECK-NEXT: vssra.vv v8, v8, v10
293 %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 0, i64 %vl)
294 ret <vscale x 8 x i16> %0
297 declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64 immarg, i64)
299 define <vscale x 8 x i16> @test_vssra_vx_i16m2(<vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
300 ; CHECK-LABEL: test_vssra_vx_i16m2:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
303 ; CHECK-NEXT: csrwi vxrm, 0
304 ; CHECK-NEXT: vssra.vx v8, v8, a0
307 %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, i64 0, i64 %vl)
308 ret <vscale x 8 x i16> %0
311 declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, i64 immarg, i64)
313 define <vscale x 16 x i16> @test_vssra_vv_i16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
314 ; CHECK-LABEL: test_vssra_vv_i16m4:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
317 ; CHECK-NEXT: csrwi vxrm, 0
318 ; CHECK-NEXT: vssra.vv v8, v8, v12
321 %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 0, i64 %vl)
322 ret <vscale x 16 x i16> %0
325 declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i64 immarg, i64)
327 define <vscale x 16 x i16> @test_vssra_vx_i16m4(<vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
328 ; CHECK-LABEL: test_vssra_vx_i16m4:
329 ; CHECK: # %bb.0: # %entry
330 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
331 ; CHECK-NEXT: csrwi vxrm, 0
332 ; CHECK-NEXT: vssra.vx v8, v8, a0
335 %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, i64 0, i64 %vl)
336 ret <vscale x 16 x i16> %0
339 declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, i64 immarg, i64)
341 define <vscale x 32 x i16> @test_vssra_vv_i16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
342 ; CHECK-LABEL: test_vssra_vv_i16m8:
343 ; CHECK: # %bb.0: # %entry
344 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
345 ; CHECK-NEXT: csrwi vxrm, 0
346 ; CHECK-NEXT: vssra.vv v8, v8, v16
349 %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 0, i64 %vl)
350 ret <vscale x 32 x i16> %0
353 declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i64 immarg, i64)
355 define <vscale x 32 x i16> @test_vssra_vx_i16m8(<vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
356 ; CHECK-LABEL: test_vssra_vx_i16m8:
357 ; CHECK: # %bb.0: # %entry
358 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
359 ; CHECK-NEXT: csrwi vxrm, 0
360 ; CHECK-NEXT: vssra.vx v8, v8, a0
363 %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, i64 0, i64 %vl)
364 ret <vscale x 32 x i16> %0
367 declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, i64 immarg, i64)
369 define <vscale x 1 x i32> @test_vssra_vv_i32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
370 ; CHECK-LABEL: test_vssra_vv_i32mf2:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
373 ; CHECK-NEXT: csrwi vxrm, 0
374 ; CHECK-NEXT: vssra.vv v8, v8, v9
377 %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 0, i64 %vl)
378 ret <vscale x 1 x i32> %0
381 declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i64 immarg, i64)
383 define <vscale x 1 x i32> @test_vssra_vx_i32mf2(<vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
384 ; CHECK-LABEL: test_vssra_vx_i32mf2:
385 ; CHECK: # %bb.0: # %entry
386 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
387 ; CHECK-NEXT: csrwi vxrm, 0
388 ; CHECK-NEXT: vssra.vx v8, v8, a0
391 %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, i64 0, i64 %vl)
392 ret <vscale x 1 x i32> %0
395 declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, i64 immarg, i64)
397 define <vscale x 2 x i32> @test_vssra_vv_i32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
398 ; CHECK-LABEL: test_vssra_vv_i32m1:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
401 ; CHECK-NEXT: csrwi vxrm, 0
402 ; CHECK-NEXT: vssra.vv v8, v8, v9
405 %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 0, i64 %vl)
406 ret <vscale x 2 x i32> %0
409 declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64 immarg, i64)
411 define <vscale x 2 x i32> @test_vssra_vx_i32m1(<vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
412 ; CHECK-LABEL: test_vssra_vx_i32m1:
413 ; CHECK: # %bb.0: # %entry
414 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
415 ; CHECK-NEXT: csrwi vxrm, 0
416 ; CHECK-NEXT: vssra.vx v8, v8, a0
419 %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, i64 0, i64 %vl)
420 ret <vscale x 2 x i32> %0
423 declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64 immarg, i64)
425 define <vscale x 4 x i32> @test_vssra_vv_i32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
426 ; CHECK-LABEL: test_vssra_vv_i32m2:
427 ; CHECK: # %bb.0: # %entry
428 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
429 ; CHECK-NEXT: csrwi vxrm, 0
430 ; CHECK-NEXT: vssra.vv v8, v8, v10
433 %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 0, i64 %vl)
434 ret <vscale x 4 x i32> %0
437 declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64 immarg, i64)
439 define <vscale x 4 x i32> @test_vssra_vx_i32m2(<vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
440 ; CHECK-LABEL: test_vssra_vx_i32m2:
441 ; CHECK: # %bb.0: # %entry
442 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
443 ; CHECK-NEXT: csrwi vxrm, 0
444 ; CHECK-NEXT: vssra.vx v8, v8, a0
447 %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, i64 0, i64 %vl)
448 ret <vscale x 4 x i32> %0
451 declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, i64 immarg, i64)
453 define <vscale x 8 x i32> @test_vssra_vv_i32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
454 ; CHECK-LABEL: test_vssra_vv_i32m4:
455 ; CHECK: # %bb.0: # %entry
456 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
457 ; CHECK-NEXT: csrwi vxrm, 0
458 ; CHECK-NEXT: vssra.vv v8, v8, v12
461 %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 0, i64 %vl)
462 ret <vscale x 8 x i32> %0
465 declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i64 immarg, i64)
467 define <vscale x 8 x i32> @test_vssra_vx_i32m4(<vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
468 ; CHECK-LABEL: test_vssra_vx_i32m4:
469 ; CHECK: # %bb.0: # %entry
470 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
471 ; CHECK-NEXT: csrwi vxrm, 0
472 ; CHECK-NEXT: vssra.vx v8, v8, a0
475 %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, i64 0, i64 %vl)
476 ret <vscale x 8 x i32> %0
479 declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, i64 immarg, i64)
481 define <vscale x 16 x i32> @test_vssra_vv_i32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
482 ; CHECK-LABEL: test_vssra_vv_i32m8:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
485 ; CHECK-NEXT: csrwi vxrm, 0
486 ; CHECK-NEXT: vssra.vv v8, v8, v16
489 %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 0, i64 %vl)
490 ret <vscale x 16 x i32> %0
493 declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i64 immarg, i64)
495 define <vscale x 16 x i32> @test_vssra_vx_i32m8(<vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
496 ; CHECK-LABEL: test_vssra_vx_i32m8:
497 ; CHECK: # %bb.0: # %entry
498 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
499 ; CHECK-NEXT: csrwi vxrm, 0
500 ; CHECK-NEXT: vssra.vx v8, v8, a0
503 %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, i64 0, i64 %vl)
504 ret <vscale x 16 x i32> %0
507 declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, i64 immarg, i64)
509 define <vscale x 1 x i64> @test_vssra_vv_i64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
510 ; CHECK-LABEL: test_vssra_vv_i64m1:
511 ; CHECK: # %bb.0: # %entry
512 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
513 ; CHECK-NEXT: csrwi vxrm, 0
514 ; CHECK-NEXT: vssra.vv v8, v8, v9
517 %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 0, i64 %vl)
518 ret <vscale x 1 x i64> %0
521 declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64 immarg, i64)
523 define <vscale x 1 x i64> @test_vssra_vx_i64m1(<vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
524 ; CHECK-LABEL: test_vssra_vx_i64m1:
525 ; CHECK: # %bb.0: # %entry
526 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
527 ; CHECK-NEXT: csrwi vxrm, 0
528 ; CHECK-NEXT: vssra.vx v8, v8, a0
531 %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, i64 0, i64 %vl)
532 ret <vscale x 1 x i64> %0
535 declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64 immarg, i64)
537 define <vscale x 2 x i64> @test_vssra_vv_i64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
538 ; CHECK-LABEL: test_vssra_vv_i64m2:
539 ; CHECK: # %bb.0: # %entry
540 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
541 ; CHECK-NEXT: csrwi vxrm, 0
542 ; CHECK-NEXT: vssra.vv v8, v8, v10
545 %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 0, i64 %vl)
546 ret <vscale x 2 x i64> %0
549 declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i64 immarg, i64)
551 define <vscale x 2 x i64> @test_vssra_vx_i64m2(<vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
552 ; CHECK-LABEL: test_vssra_vx_i64m2:
553 ; CHECK: # %bb.0: # %entry
554 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
555 ; CHECK-NEXT: csrwi vxrm, 0
556 ; CHECK-NEXT: vssra.vx v8, v8, a0
559 %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, i64 0, i64 %vl)
560 ret <vscale x 2 x i64> %0
563 declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64 immarg, i64)
565 define <vscale x 4 x i64> @test_vssra_vv_i64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
566 ; CHECK-LABEL: test_vssra_vv_i64m4:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
569 ; CHECK-NEXT: csrwi vxrm, 0
570 ; CHECK-NEXT: vssra.vv v8, v8, v12
573 %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 0, i64 %vl)
574 ret <vscale x 4 x i64> %0
577 declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i64 immarg, i64)
579 define <vscale x 4 x i64> @test_vssra_vx_i64m4(<vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
580 ; CHECK-LABEL: test_vssra_vx_i64m4:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
583 ; CHECK-NEXT: csrwi vxrm, 0
584 ; CHECK-NEXT: vssra.vx v8, v8, a0
587 %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, i64 0, i64 %vl)
588 ret <vscale x 4 x i64> %0
591 declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64 immarg, i64)
593 define <vscale x 8 x i64> @test_vssra_vv_i64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
594 ; CHECK-LABEL: test_vssra_vv_i64m8:
595 ; CHECK: # %bb.0: # %entry
596 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
597 ; CHECK-NEXT: csrwi vxrm, 0
598 ; CHECK-NEXT: vssra.vv v8, v8, v16
601 %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 0, i64 %vl)
602 ret <vscale x 8 x i64> %0
605 declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i64 immarg, i64)
607 define <vscale x 8 x i64> @test_vssra_vx_i64m8(<vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
608 ; CHECK-LABEL: test_vssra_vx_i64m8:
609 ; CHECK: # %bb.0: # %entry
610 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
611 ; CHECK-NEXT: csrwi vxrm, 0
612 ; CHECK-NEXT: vssra.vx v8, v8, a0
615 %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, i64 0, i64 %vl)
616 ret <vscale x 8 x i64> %0
619 declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64 immarg, i64)
621 define <vscale x 1 x i8> @test_vssra_vv_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
622 ; CHECK-LABEL: test_vssra_vv_i8mf8_m:
623 ; CHECK: # %bb.0: # %entry
624 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
625 ; CHECK-NEXT: csrwi vxrm, 0
626 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
629 %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
630 ret <vscale x 1 x i8> %0
633 declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
635 define <vscale x 1 x i8> @test_vssra_vx_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
636 ; CHECK-LABEL: test_vssra_vx_i8mf8_m:
637 ; CHECK: # %bb.0: # %entry
638 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
639 ; CHECK-NEXT: csrwi vxrm, 0
640 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
643 %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
644 ret <vscale x 1 x i8> %0
647 declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
649 define <vscale x 2 x i8> @test_vssra_vv_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
650 ; CHECK-LABEL: test_vssra_vv_i8mf4_m:
651 ; CHECK: # %bb.0: # %entry
652 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
653 ; CHECK-NEXT: csrwi vxrm, 0
654 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
657 %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
658 ret <vscale x 2 x i8> %0
661 declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
663 define <vscale x 2 x i8> @test_vssra_vx_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
664 ; CHECK-LABEL: test_vssra_vx_i8mf4_m:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
667 ; CHECK-NEXT: csrwi vxrm, 0
668 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
671 %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
672 ret <vscale x 2 x i8> %0
675 declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
677 define <vscale x 4 x i8> @test_vssra_vv_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
678 ; CHECK-LABEL: test_vssra_vv_i8mf2_m:
679 ; CHECK: # %bb.0: # %entry
680 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
681 ; CHECK-NEXT: csrwi vxrm, 0
682 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
685 %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
686 ret <vscale x 4 x i8> %0
689 declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
691 define <vscale x 4 x i8> @test_vssra_vx_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
692 ; CHECK-LABEL: test_vssra_vx_i8mf2_m:
693 ; CHECK: # %bb.0: # %entry
694 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
695 ; CHECK-NEXT: csrwi vxrm, 0
696 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
699 %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
700 ret <vscale x 4 x i8> %0
703 declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
705 define <vscale x 8 x i8> @test_vssra_vv_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
706 ; CHECK-LABEL: test_vssra_vv_i8m1_m:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
709 ; CHECK-NEXT: csrwi vxrm, 0
710 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
713 %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
714 ret <vscale x 8 x i8> %0
717 declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
719 define <vscale x 8 x i8> @test_vssra_vx_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
720 ; CHECK-LABEL: test_vssra_vx_i8m1_m:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
723 ; CHECK-NEXT: csrwi vxrm, 0
724 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
727 %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
728 ret <vscale x 8 x i8> %0
731 declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
733 define <vscale x 16 x i8> @test_vssra_vv_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
734 ; CHECK-LABEL: test_vssra_vv_i8m2_m:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
737 ; CHECK-NEXT: csrwi vxrm, 0
738 ; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
741 %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
742 ret <vscale x 16 x i8> %0
745 declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
747 define <vscale x 16 x i8> @test_vssra_vx_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
748 ; CHECK-LABEL: test_vssra_vx_i8m2_m:
749 ; CHECK: # %bb.0: # %entry
750 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
751 ; CHECK-NEXT: csrwi vxrm, 0
752 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
755 %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
756 ret <vscale x 16 x i8> %0
759 declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
761 define <vscale x 32 x i8> @test_vssra_vv_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
762 ; CHECK-LABEL: test_vssra_vv_i8m4_m:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
765 ; CHECK-NEXT: csrwi vxrm, 0
766 ; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
769 %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
770 ret <vscale x 32 x i8> %0
773 declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
775 define <vscale x 32 x i8> @test_vssra_vx_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
776 ; CHECK-LABEL: test_vssra_vx_i8m4_m:
777 ; CHECK: # %bb.0: # %entry
778 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
779 ; CHECK-NEXT: csrwi vxrm, 0
780 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
783 %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
784 ret <vscale x 32 x i8> %0
787 declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
789 define <vscale x 64 x i8> @test_vssra_vv_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
790 ; CHECK-LABEL: test_vssra_vv_i8m8_m:
791 ; CHECK: # %bb.0: # %entry
792 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
793 ; CHECK-NEXT: csrwi vxrm, 0
794 ; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
797 %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
798 ret <vscale x 64 x i8> %0
801 declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
803 define <vscale x 64 x i8> @test_vssra_vx_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
804 ; CHECK-LABEL: test_vssra_vx_i8m8_m:
805 ; CHECK: # %bb.0: # %entry
806 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
807 ; CHECK-NEXT: csrwi vxrm, 0
808 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
811 %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
812 ret <vscale x 64 x i8> %0
815 declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
817 define <vscale x 1 x i16> @test_vssra_vv_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
818 ; CHECK-LABEL: test_vssra_vv_i16mf4_m:
819 ; CHECK: # %bb.0: # %entry
820 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
821 ; CHECK-NEXT: csrwi vxrm, 0
822 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
825 %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
826 ret <vscale x 1 x i16> %0
829 declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
831 define <vscale x 1 x i16> @test_vssra_vx_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
832 ; CHECK-LABEL: test_vssra_vx_i16mf4_m:
833 ; CHECK: # %bb.0: # %entry
834 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
835 ; CHECK-NEXT: csrwi vxrm, 0
836 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
839 %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
840 ret <vscale x 1 x i16> %0
843 declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
845 define <vscale x 2 x i16> @test_vssra_vv_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
846 ; CHECK-LABEL: test_vssra_vv_i16mf2_m:
847 ; CHECK: # %bb.0: # %entry
848 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
849 ; CHECK-NEXT: csrwi vxrm, 0
850 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
853 %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
854 ret <vscale x 2 x i16> %0
857 declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
859 define <vscale x 2 x i16> @test_vssra_vx_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
860 ; CHECK-LABEL: test_vssra_vx_i16mf2_m:
861 ; CHECK: # %bb.0: # %entry
862 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
863 ; CHECK-NEXT: csrwi vxrm, 0
864 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
867 %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
868 ret <vscale x 2 x i16> %0
871 declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
873 define <vscale x 4 x i16> @test_vssra_vv_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
874 ; CHECK-LABEL: test_vssra_vv_i16m1_m:
875 ; CHECK: # %bb.0: # %entry
876 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
877 ; CHECK-NEXT: csrwi vxrm, 0
878 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
881 %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
882 ret <vscale x 4 x i16> %0
885 declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
887 define <vscale x 4 x i16> @test_vssra_vx_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
888 ; CHECK-LABEL: test_vssra_vx_i16m1_m:
889 ; CHECK: # %bb.0: # %entry
890 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
891 ; CHECK-NEXT: csrwi vxrm, 0
892 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
895 %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
896 ret <vscale x 4 x i16> %0
899 declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
901 define <vscale x 8 x i16> @test_vssra_vv_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
902 ; CHECK-LABEL: test_vssra_vv_i16m2_m:
903 ; CHECK: # %bb.0: # %entry
904 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
905 ; CHECK-NEXT: csrwi vxrm, 0
906 ; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
909 %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
910 ret <vscale x 8 x i16> %0
913 declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
915 define <vscale x 8 x i16> @test_vssra_vx_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
916 ; CHECK-LABEL: test_vssra_vx_i16m2_m:
917 ; CHECK: # %bb.0: # %entry
918 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
919 ; CHECK-NEXT: csrwi vxrm, 0
920 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
923 %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
924 ret <vscale x 8 x i16> %0
927 declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
929 define <vscale x 16 x i16> @test_vssra_vv_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
930 ; CHECK-LABEL: test_vssra_vv_i16m4_m:
931 ; CHECK: # %bb.0: # %entry
932 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
933 ; CHECK-NEXT: csrwi vxrm, 0
934 ; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
937 %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
938 ret <vscale x 16 x i16> %0
941 declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
943 define <vscale x 16 x i16> @test_vssra_vx_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
944 ; CHECK-LABEL: test_vssra_vx_i16m4_m:
945 ; CHECK: # %bb.0: # %entry
946 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
947 ; CHECK-NEXT: csrwi vxrm, 0
948 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
951 %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
952 ret <vscale x 16 x i16> %0
955 declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
957 define <vscale x 32 x i16> @test_vssra_vv_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
958 ; CHECK-LABEL: test_vssra_vv_i16m8_m:
959 ; CHECK: # %bb.0: # %entry
960 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
961 ; CHECK-NEXT: csrwi vxrm, 0
962 ; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
965 %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
966 ret <vscale x 32 x i16> %0
969 declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
971 define <vscale x 32 x i16> @test_vssra_vx_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
972 ; CHECK-LABEL: test_vssra_vx_i16m8_m:
973 ; CHECK: # %bb.0: # %entry
974 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
975 ; CHECK-NEXT: csrwi vxrm, 0
976 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
979 %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
980 ret <vscale x 32 x i16> %0
983 declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
985 define <vscale x 1 x i32> @test_vssra_vv_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
986 ; CHECK-LABEL: test_vssra_vv_i32mf2_m:
987 ; CHECK: # %bb.0: # %entry
988 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
989 ; CHECK-NEXT: csrwi vxrm, 0
990 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
993 %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
994 ret <vscale x 1 x i32> %0
997 declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
999 define <vscale x 1 x i32> @test_vssra_vx_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
1000 ; CHECK-LABEL: test_vssra_vx_i32mf2_m:
1001 ; CHECK: # %bb.0: # %entry
1002 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1003 ; CHECK-NEXT: csrwi vxrm, 0
1004 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1007 %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
1008 ret <vscale x 1 x i32> %0
1011 declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
1013 define <vscale x 2 x i32> @test_vssra_vv_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
1014 ; CHECK-LABEL: test_vssra_vv_i32m1_m:
1015 ; CHECK: # %bb.0: # %entry
1016 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1017 ; CHECK-NEXT: csrwi vxrm, 0
1018 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
1021 %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
1022 ret <vscale x 2 x i32> %0
1025 declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
1027 define <vscale x 2 x i32> @test_vssra_vx_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
1028 ; CHECK-LABEL: test_vssra_vx_i32m1_m:
1029 ; CHECK: # %bb.0: # %entry
1030 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1031 ; CHECK-NEXT: csrwi vxrm, 0
1032 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1035 %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
1036 ret <vscale x 2 x i32> %0
1039 declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
1041 define <vscale x 4 x i32> @test_vssra_vv_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
1042 ; CHECK-LABEL: test_vssra_vv_i32m2_m:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1045 ; CHECK-NEXT: csrwi vxrm, 0
1046 ; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
1049 %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
1050 ret <vscale x 4 x i32> %0
1053 declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
1055 define <vscale x 4 x i32> @test_vssra_vx_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
1056 ; CHECK-LABEL: test_vssra_vx_i32m2_m:
1057 ; CHECK: # %bb.0: # %entry
1058 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1059 ; CHECK-NEXT: csrwi vxrm, 0
1060 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1063 %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
1064 ret <vscale x 4 x i32> %0
1067 declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
1069 define <vscale x 8 x i32> @test_vssra_vv_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
1070 ; CHECK-LABEL: test_vssra_vv_i32m4_m:
1071 ; CHECK: # %bb.0: # %entry
1072 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1073 ; CHECK-NEXT: csrwi vxrm, 0
1074 ; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
1077 %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
1078 ret <vscale x 8 x i32> %0
1081 declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
1083 define <vscale x 8 x i32> @test_vssra_vx_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
1084 ; CHECK-LABEL: test_vssra_vx_i32m4_m:
1085 ; CHECK: # %bb.0: # %entry
1086 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1087 ; CHECK-NEXT: csrwi vxrm, 0
1088 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1091 %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
1092 ret <vscale x 8 x i32> %0
1095 declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
1097 define <vscale x 16 x i32> @test_vssra_vv_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
1098 ; CHECK-LABEL: test_vssra_vv_i32m8_m:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1101 ; CHECK-NEXT: csrwi vxrm, 0
1102 ; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
1105 %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
1106 ret <vscale x 16 x i32> %0
1109 declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
1111 define <vscale x 16 x i32> @test_vssra_vx_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
1112 ; CHECK-LABEL: test_vssra_vx_i32m8_m:
1113 ; CHECK: # %bb.0: # %entry
1114 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1115 ; CHECK-NEXT: csrwi vxrm, 0
1116 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1119 %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
1120 ret <vscale x 16 x i32> %0
1123 declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
1125 define <vscale x 1 x i64> @test_vssra_vv_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
1126 ; CHECK-LABEL: test_vssra_vv_i64m1_m:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1129 ; CHECK-NEXT: csrwi vxrm, 0
1130 ; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
1133 %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
1134 ret <vscale x 1 x i64> %0
1137 declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
1139 define <vscale x 1 x i64> @test_vssra_vx_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
1140 ; CHECK-LABEL: test_vssra_vx_i64m1_m:
1141 ; CHECK: # %bb.0: # %entry
1142 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1143 ; CHECK-NEXT: csrwi vxrm, 0
1144 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1147 %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
1148 ret <vscale x 1 x i64> %0
1151 declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
1153 define <vscale x 2 x i64> @test_vssra_vv_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
1154 ; CHECK-LABEL: test_vssra_vv_i64m2_m:
1155 ; CHECK: # %bb.0: # %entry
1156 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1157 ; CHECK-NEXT: csrwi vxrm, 0
1158 ; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
1161 %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
1162 ret <vscale x 2 x i64> %0
1165 declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
1167 define <vscale x 2 x i64> @test_vssra_vx_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
1168 ; CHECK-LABEL: test_vssra_vx_i64m2_m:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1171 ; CHECK-NEXT: csrwi vxrm, 0
1172 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1175 %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
1176 ret <vscale x 2 x i64> %0
1179 declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
1181 define <vscale x 4 x i64> @test_vssra_vv_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
1182 ; CHECK-LABEL: test_vssra_vv_i64m4_m:
1183 ; CHECK: # %bb.0: # %entry
1184 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1185 ; CHECK-NEXT: csrwi vxrm, 0
1186 ; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
1189 %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
1190 ret <vscale x 4 x i64> %0
1193 declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
1195 define <vscale x 4 x i64> @test_vssra_vx_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
1196 ; CHECK-LABEL: test_vssra_vx_i64m4_m:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1199 ; CHECK-NEXT: csrwi vxrm, 0
1200 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1203 %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
1204 ret <vscale x 4 x i64> %0
1207 declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
1209 define <vscale x 8 x i64> @test_vssra_vv_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
1210 ; CHECK-LABEL: test_vssra_vv_i64m8_m:
1211 ; CHECK: # %bb.0: # %entry
1212 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1213 ; CHECK-NEXT: csrwi vxrm, 0
1214 ; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
1217 %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
1218 ret <vscale x 8 x i64> %0
1221 declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
1223 define <vscale x 8 x i64> @test_vssra_vx_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
1224 ; CHECK-LABEL: test_vssra_vx_i64m8_m:
1225 ; CHECK: # %bb.0: # %entry
1226 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1227 ; CHECK-NEXT: csrwi vxrm, 0
1228 ; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
1231 %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
1232 ret <vscale x 8 x i64> %0
1235 declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)