1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
6 ; CHECK-LABEL: vsra_vv_nxv1i8:
8 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
9 ; CHECK-NEXT: vsra.vv v8, v8, v9
11 %vc = ashr <vscale x 1 x i8> %va, %vb
12 ret <vscale x 1 x i8> %vc
15 define <vscale x 1 x i8> @vsra_vv_nxv1i8_sext_zext(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
16 ; CHECK-LABEL: vsra_vv_nxv1i8_sext_zext:
18 ; CHECK-NEXT: li a0, 7
19 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
20 ; CHECK-NEXT: vmin.vx v9, v8, a0
21 ; CHECK-NEXT: vsra.vv v8, v8, v9
23 %sexted_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i32>
24 %zexted_vb = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
25 %expand = ashr <vscale x 1 x i32> %sexted_va, %zexted_vb
26 %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i8>
27 ret <vscale x 1 x i8> %vc
30 define <vscale x 1 x i8> @vsra_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
31 ; CHECK-LABEL: vsra_vx_nxv1i8:
33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
34 ; CHECK-NEXT: vsra.vx v8, v8, a0
36 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
37 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
38 %vc = ashr <vscale x 1 x i8> %va, %splat
39 ret <vscale x 1 x i8> %vc
42 define <vscale x 1 x i8> @vsra_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
43 ; CHECK-LABEL: vsra_vi_nxv1i8_0:
45 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
46 ; CHECK-NEXT: vsra.vi v8, v8, 6
48 %vc = ashr <vscale x 1 x i8> %va, splat (i8 6)
49 ret <vscale x 1 x i8> %vc
52 define <vscale x 2 x i8> @vsra_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
53 ; CHECK-LABEL: vsra_vv_nxv2i8:
55 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
56 ; CHECK-NEXT: vsra.vv v8, v8, v9
58 %vc = ashr <vscale x 2 x i8> %va, %vb
59 ret <vscale x 2 x i8> %vc
62 define <vscale x 2 x i8> @vsra_vv_nxv2i8_sext_zext(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
63 ; CHECK-LABEL: vsra_vv_nxv2i8_sext_zext:
65 ; CHECK-NEXT: li a0, 7
66 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
67 ; CHECK-NEXT: vmin.vx v9, v8, a0
68 ; CHECK-NEXT: vsra.vv v8, v8, v9
70 %sexted_va = sext <vscale x 2 x i8> %va to <vscale x 2 x i32>
71 %zexted_vb = zext <vscale x 2 x i8> %va to <vscale x 2 x i32>
72 %expand = ashr <vscale x 2 x i32> %sexted_va, %zexted_vb
73 %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i8>
74 ret <vscale x 2 x i8> %vc
77 define <vscale x 2 x i8> @vsra_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
78 ; CHECK-LABEL: vsra_vx_nxv2i8:
80 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
81 ; CHECK-NEXT: vsra.vx v8, v8, a0
83 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
84 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
85 %vc = ashr <vscale x 2 x i8> %va, %splat
86 ret <vscale x 2 x i8> %vc
89 define <vscale x 2 x i8> @vsra_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
90 ; CHECK-LABEL: vsra_vi_nxv2i8_0:
92 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
93 ; CHECK-NEXT: vsra.vi v8, v8, 6
95 %vc = ashr <vscale x 2 x i8> %va, splat (i8 6)
96 ret <vscale x 2 x i8> %vc
99 define <vscale x 4 x i8> @vsra_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
100 ; CHECK-LABEL: vsra_vv_nxv4i8:
102 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
103 ; CHECK-NEXT: vsra.vv v8, v8, v9
105 %vc = ashr <vscale x 4 x i8> %va, %vb
106 ret <vscale x 4 x i8> %vc
109 define <vscale x 4 x i8> @vsra_vv_nxv4i8_sext_zext(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
110 ; CHECK-LABEL: vsra_vv_nxv4i8_sext_zext:
112 ; CHECK-NEXT: li a0, 7
113 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
114 ; CHECK-NEXT: vmin.vx v9, v8, a0
115 ; CHECK-NEXT: vsra.vv v8, v8, v9
117 %sexted_va = sext <vscale x 4 x i8> %va to <vscale x 4 x i32>
118 %zexted_vb = zext <vscale x 4 x i8> %va to <vscale x 4 x i32>
119 %expand = ashr <vscale x 4 x i32> %sexted_va, %zexted_vb
120 %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i8>
121 ret <vscale x 4 x i8> %vc
124 define <vscale x 4 x i8> @vsra_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
125 ; CHECK-LABEL: vsra_vx_nxv4i8:
127 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
128 ; CHECK-NEXT: vsra.vx v8, v8, a0
130 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
131 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
132 %vc = ashr <vscale x 4 x i8> %va, %splat
133 ret <vscale x 4 x i8> %vc
136 define <vscale x 4 x i8> @vsra_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
137 ; CHECK-LABEL: vsra_vi_nxv4i8_0:
139 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
140 ; CHECK-NEXT: vsra.vi v8, v8, 6
142 %vc = ashr <vscale x 4 x i8> %va, splat (i8 6)
143 ret <vscale x 4 x i8> %vc
146 define <vscale x 8 x i8> @vsra_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
147 ; CHECK-LABEL: vsra_vv_nxv8i8:
149 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
150 ; CHECK-NEXT: vsra.vv v8, v8, v9
152 %vc = ashr <vscale x 8 x i8> %va, %vb
153 ret <vscale x 8 x i8> %vc
156 define <vscale x 8 x i8> @vsra_vv_nxv8i8_sext_zext(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
157 ; CHECK-LABEL: vsra_vv_nxv8i8_sext_zext:
159 ; CHECK-NEXT: li a0, 7
160 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
161 ; CHECK-NEXT: vmin.vx v9, v8, a0
162 ; CHECK-NEXT: vsra.vv v8, v8, v9
164 %sexted_va = sext <vscale x 8 x i8> %va to <vscale x 8 x i32>
165 %zexted_vb = zext <vscale x 8 x i8> %va to <vscale x 8 x i32>
166 %expand = ashr <vscale x 8 x i32> %sexted_va, %zexted_vb
167 %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i8>
168 ret <vscale x 8 x i8> %vc
171 define <vscale x 8 x i8> @vsra_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
172 ; CHECK-LABEL: vsra_vx_nxv8i8:
174 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
175 ; CHECK-NEXT: vsra.vx v8, v8, a0
177 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
178 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
179 %vc = ashr <vscale x 8 x i8> %va, %splat
180 ret <vscale x 8 x i8> %vc
183 define <vscale x 8 x i8> @vsra_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
184 ; CHECK-LABEL: vsra_vi_nxv8i8_0:
186 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
187 ; CHECK-NEXT: vsra.vi v8, v8, 6
189 %vc = ashr <vscale x 8 x i8> %va, splat (i8 6)
190 ret <vscale x 8 x i8> %vc
193 define <vscale x 16 x i8> @vsra_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
194 ; CHECK-LABEL: vsra_vv_nxv16i8:
196 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
197 ; CHECK-NEXT: vsra.vv v8, v8, v10
199 %vc = ashr <vscale x 16 x i8> %va, %vb
200 ret <vscale x 16 x i8> %vc
203 define <vscale x 16 x i8> @vsra_vv_nxv16i8_sext_zext(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
204 ; CHECK-LABEL: vsra_vv_nxv16i8_sext_zext:
206 ; CHECK-NEXT: li a0, 7
207 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
208 ; CHECK-NEXT: vmin.vx v10, v8, a0
209 ; CHECK-NEXT: vsra.vv v8, v8, v10
211 %sexted_va = sext <vscale x 16 x i8> %va to <vscale x 16 x i32>
212 %zexted_vb = zext <vscale x 16 x i8> %va to <vscale x 16 x i32>
213 %expand = ashr <vscale x 16 x i32> %sexted_va, %zexted_vb
214 %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i8>
215 ret <vscale x 16 x i8> %vc
218 define <vscale x 16 x i8> @vsra_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
219 ; CHECK-LABEL: vsra_vx_nxv16i8:
221 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
222 ; CHECK-NEXT: vsra.vx v8, v8, a0
224 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
225 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
226 %vc = ashr <vscale x 16 x i8> %va, %splat
227 ret <vscale x 16 x i8> %vc
230 define <vscale x 16 x i8> @vsra_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
231 ; CHECK-LABEL: vsra_vi_nxv16i8_0:
233 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
234 ; CHECK-NEXT: vsra.vi v8, v8, 6
236 %vc = ashr <vscale x 16 x i8> %va, splat (i8 6)
237 ret <vscale x 16 x i8> %vc
240 define <vscale x 32 x i8> @vsra_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
241 ; CHECK-LABEL: vsra_vv_nxv32i8:
243 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
244 ; CHECK-NEXT: vsra.vv v8, v8, v12
246 %vc = ashr <vscale x 32 x i8> %va, %vb
247 ret <vscale x 32 x i8> %vc
250 define <vscale x 32 x i8> @vsra_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
251 ; CHECK-LABEL: vsra_vx_nxv32i8:
253 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
254 ; CHECK-NEXT: vsra.vx v8, v8, a0
256 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
257 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
258 %vc = ashr <vscale x 32 x i8> %va, %splat
259 ret <vscale x 32 x i8> %vc
262 define <vscale x 32 x i8> @vsra_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
263 ; CHECK-LABEL: vsra_vi_nxv32i8_0:
265 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
266 ; CHECK-NEXT: vsra.vi v8, v8, 6
268 %vc = ashr <vscale x 32 x i8> %va, splat (i8 6)
269 ret <vscale x 32 x i8> %vc
272 define <vscale x 64 x i8> @vsra_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
273 ; CHECK-LABEL: vsra_vv_nxv64i8:
275 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
276 ; CHECK-NEXT: vsra.vv v8, v8, v16
278 %vc = ashr <vscale x 64 x i8> %va, %vb
279 ret <vscale x 64 x i8> %vc
282 define <vscale x 64 x i8> @vsra_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
283 ; CHECK-LABEL: vsra_vx_nxv64i8:
285 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
286 ; CHECK-NEXT: vsra.vx v8, v8, a0
288 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
289 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
290 %vc = ashr <vscale x 64 x i8> %va, %splat
291 ret <vscale x 64 x i8> %vc
294 define <vscale x 64 x i8> @vsra_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
295 ; CHECK-LABEL: vsra_vi_nxv64i8_0:
297 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
298 ; CHECK-NEXT: vsra.vi v8, v8, 6
300 %vc = ashr <vscale x 64 x i8> %va, splat (i8 6)
301 ret <vscale x 64 x i8> %vc
304 define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
305 ; CHECK-LABEL: vsra_vv_nxv1i16:
307 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
308 ; CHECK-NEXT: vsra.vv v8, v8, v9
310 %vc = ashr <vscale x 1 x i16> %va, %vb
311 ret <vscale x 1 x i16> %vc
314 define <vscale x 1 x i16> @vsra_vv_nxv1i16_sext_zext(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
315 ; CHECK-LABEL: vsra_vv_nxv1i16_sext_zext:
317 ; CHECK-NEXT: li a0, 15
318 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
319 ; CHECK-NEXT: vmin.vx v9, v8, a0
320 ; CHECK-NEXT: vsra.vv v8, v8, v9
322 %sexted_va = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
323 %zexted_vb = zext <vscale x 1 x i16> %va to <vscale x 1 x i32>
324 %expand = ashr <vscale x 1 x i32> %sexted_va, %zexted_vb
325 %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i16>
326 ret <vscale x 1 x i16> %vc
329 define <vscale x 1 x i16> @vsra_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
330 ; CHECK-LABEL: vsra_vx_nxv1i16:
332 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
333 ; CHECK-NEXT: vsra.vx v8, v8, a0
335 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
336 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
337 %vc = ashr <vscale x 1 x i16> %va, %splat
338 ret <vscale x 1 x i16> %vc
341 define <vscale x 1 x i16> @vsra_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
342 ; CHECK-LABEL: vsra_vi_nxv1i16_0:
344 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
345 ; CHECK-NEXT: vsra.vi v8, v8, 6
347 %vc = ashr <vscale x 1 x i16> %va, splat (i16 6)
348 ret <vscale x 1 x i16> %vc
351 define <vscale x 2 x i16> @vsra_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
352 ; CHECK-LABEL: vsra_vv_nxv2i16:
354 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
355 ; CHECK-NEXT: vsra.vv v8, v8, v9
357 %vc = ashr <vscale x 2 x i16> %va, %vb
358 ret <vscale x 2 x i16> %vc
361 define <vscale x 2 x i16> @vsra_vv_nxv2i16_sext_zext(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
362 ; CHECK-LABEL: vsra_vv_nxv2i16_sext_zext:
364 ; CHECK-NEXT: li a0, 15
365 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
366 ; CHECK-NEXT: vmin.vx v9, v8, a0
367 ; CHECK-NEXT: vsra.vv v8, v8, v9
369 %sexted_va = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
370 %zexted_vb = zext <vscale x 2 x i16> %va to <vscale x 2 x i32>
371 %expand = ashr <vscale x 2 x i32> %sexted_va, %zexted_vb
372 %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i16>
373 ret <vscale x 2 x i16> %vc
376 define <vscale x 2 x i16> @vsra_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
377 ; CHECK-LABEL: vsra_vx_nxv2i16:
379 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
380 ; CHECK-NEXT: vsra.vx v8, v8, a0
382 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
383 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
384 %vc = ashr <vscale x 2 x i16> %va, %splat
385 ret <vscale x 2 x i16> %vc
388 define <vscale x 2 x i16> @vsra_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
389 ; CHECK-LABEL: vsra_vi_nxv2i16_0:
391 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
392 ; CHECK-NEXT: vsra.vi v8, v8, 6
394 %vc = ashr <vscale x 2 x i16> %va, splat (i16 6)
395 ret <vscale x 2 x i16> %vc
398 define <vscale x 4 x i16> @vsra_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
399 ; CHECK-LABEL: vsra_vv_nxv4i16:
401 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
402 ; CHECK-NEXT: vsra.vv v8, v8, v9
404 %vc = ashr <vscale x 4 x i16> %va, %vb
405 ret <vscale x 4 x i16> %vc
408 define <vscale x 4 x i16> @vsra_vv_nxv4i16_sext_zext(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
409 ; CHECK-LABEL: vsra_vv_nxv4i16_sext_zext:
411 ; CHECK-NEXT: li a0, 15
412 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
413 ; CHECK-NEXT: vmin.vx v9, v8, a0
414 ; CHECK-NEXT: vsra.vv v8, v8, v9
416 %sexted_va = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
417 %zexted_vb = zext <vscale x 4 x i16> %va to <vscale x 4 x i32>
418 %expand = ashr <vscale x 4 x i32> %sexted_va, %zexted_vb
419 %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i16>
420 ret <vscale x 4 x i16> %vc
423 define <vscale x 4 x i16> @vsra_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
424 ; CHECK-LABEL: vsra_vx_nxv4i16:
426 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
427 ; CHECK-NEXT: vsra.vx v8, v8, a0
429 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
430 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
431 %vc = ashr <vscale x 4 x i16> %va, %splat
432 ret <vscale x 4 x i16> %vc
435 define <vscale x 4 x i16> @vsra_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
436 ; CHECK-LABEL: vsra_vi_nxv4i16_0:
438 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
439 ; CHECK-NEXT: vsra.vi v8, v8, 6
441 %vc = ashr <vscale x 4 x i16> %va, splat (i16 6)
442 ret <vscale x 4 x i16> %vc
445 define <vscale x 8 x i16> @vsra_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
446 ; CHECK-LABEL: vsra_vv_nxv8i16:
448 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
449 ; CHECK-NEXT: vsra.vv v8, v8, v10
451 %vc = ashr <vscale x 8 x i16> %va, %vb
452 ret <vscale x 8 x i16> %vc
455 define <vscale x 8 x i16> @vsra_vv_nxv8i16_sext_zext(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
456 ; CHECK-LABEL: vsra_vv_nxv8i16_sext_zext:
458 ; CHECK-NEXT: li a0, 15
459 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
460 ; CHECK-NEXT: vmin.vx v10, v8, a0
461 ; CHECK-NEXT: vsra.vv v8, v8, v10
463 %sexted_va = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
464 %zexted_vb = zext <vscale x 8 x i16> %va to <vscale x 8 x i32>
465 %expand = ashr <vscale x 8 x i32> %sexted_va, %zexted_vb
466 %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i16>
467 ret <vscale x 8 x i16> %vc
470 define <vscale x 8 x i16> @vsra_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
471 ; CHECK-LABEL: vsra_vx_nxv8i16:
473 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
474 ; CHECK-NEXT: vsra.vx v8, v8, a0
476 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
477 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
478 %vc = ashr <vscale x 8 x i16> %va, %splat
479 ret <vscale x 8 x i16> %vc
482 define <vscale x 8 x i16> @vsra_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
483 ; CHECK-LABEL: vsra_vi_nxv8i16_0:
485 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
486 ; CHECK-NEXT: vsra.vi v8, v8, 6
488 %vc = ashr <vscale x 8 x i16> %va, splat (i16 6)
489 ret <vscale x 8 x i16> %vc
492 define <vscale x 16 x i16> @vsra_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
493 ; CHECK-LABEL: vsra_vv_nxv16i16:
495 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
496 ; CHECK-NEXT: vsra.vv v8, v8, v12
498 %vc = ashr <vscale x 16 x i16> %va, %vb
499 ret <vscale x 16 x i16> %vc
502 define <vscale x 16 x i16> @vsra_vv_nxv16i16_sext_zext(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
503 ; CHECK-LABEL: vsra_vv_nxv16i16_sext_zext:
505 ; CHECK-NEXT: li a0, 15
506 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
507 ; CHECK-NEXT: vmin.vx v12, v8, a0
508 ; CHECK-NEXT: vsra.vv v8, v8, v12
510 %sexted_va = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
511 %zexted_vb = zext <vscale x 16 x i16> %va to <vscale x 16 x i32>
512 %expand = ashr <vscale x 16 x i32> %sexted_va, %zexted_vb
513 %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i16>
514 ret <vscale x 16 x i16> %vc
517 define <vscale x 16 x i16> @vsra_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
518 ; CHECK-LABEL: vsra_vx_nxv16i16:
520 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
521 ; CHECK-NEXT: vsra.vx v8, v8, a0
523 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
524 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
525 %vc = ashr <vscale x 16 x i16> %va, %splat
526 ret <vscale x 16 x i16> %vc
529 define <vscale x 16 x i16> @vsra_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
530 ; CHECK-LABEL: vsra_vi_nxv16i16_0:
532 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
533 ; CHECK-NEXT: vsra.vi v8, v8, 6
535 %vc = ashr <vscale x 16 x i16> %va, splat (i16 6)
536 ret <vscale x 16 x i16> %vc
539 define <vscale x 32 x i16> @vsra_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
540 ; CHECK-LABEL: vsra_vv_nxv32i16:
542 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
543 ; CHECK-NEXT: vsra.vv v8, v8, v16
545 %vc = ashr <vscale x 32 x i16> %va, %vb
546 ret <vscale x 32 x i16> %vc
549 define <vscale x 32 x i16> @vsra_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
550 ; CHECK-LABEL: vsra_vx_nxv32i16:
552 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
553 ; CHECK-NEXT: vsra.vx v8, v8, a0
555 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
556 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
557 %vc = ashr <vscale x 32 x i16> %va, %splat
558 ret <vscale x 32 x i16> %vc
561 define <vscale x 32 x i16> @vsra_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
562 ; CHECK-LABEL: vsra_vi_nxv32i16_0:
564 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
565 ; CHECK-NEXT: vsra.vi v8, v8, 6
567 %vc = ashr <vscale x 32 x i16> %va, splat (i16 6)
568 ret <vscale x 32 x i16> %vc
571 define <vscale x 1 x i32> @vsra_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
572 ; CHECK-LABEL: vsra_vv_nxv1i32:
574 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
575 ; CHECK-NEXT: vsra.vv v8, v8, v9
577 %vc = ashr <vscale x 1 x i32> %va, %vb
578 ret <vscale x 1 x i32> %vc
581 define <vscale x 1 x i32> @vsra_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
582 ; CHECK-LABEL: vsra_vx_nxv1i32:
584 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
585 ; CHECK-NEXT: vsra.vx v8, v8, a0
587 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
588 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
589 %vc = ashr <vscale x 1 x i32> %va, %splat
590 ret <vscale x 1 x i32> %vc
593 define <vscale x 1 x i32> @vsra_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
594 ; CHECK-LABEL: vsra_vi_nxv1i32_0:
596 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
597 ; CHECK-NEXT: vsra.vi v8, v8, 31
599 %vc = ashr <vscale x 1 x i32> %va, splat (i32 31)
600 ret <vscale x 1 x i32> %vc
603 define <vscale x 2 x i32> @vsra_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
604 ; CHECK-LABEL: vsra_vv_nxv2i32:
606 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
607 ; CHECK-NEXT: vsra.vv v8, v8, v9
609 %vc = ashr <vscale x 2 x i32> %va, %vb
610 ret <vscale x 2 x i32> %vc
613 define <vscale x 2 x i32> @vsra_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
614 ; CHECK-LABEL: vsra_vx_nxv2i32:
616 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
617 ; CHECK-NEXT: vsra.vx v8, v8, a0
619 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
620 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
621 %vc = ashr <vscale x 2 x i32> %va, %splat
622 ret <vscale x 2 x i32> %vc
625 define <vscale x 2 x i32> @vsra_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
626 ; CHECK-LABEL: vsra_vi_nxv2i32_0:
628 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
629 ; CHECK-NEXT: vsra.vi v8, v8, 31
631 %vc = ashr <vscale x 2 x i32> %va, splat (i32 31)
632 ret <vscale x 2 x i32> %vc
635 define <vscale x 4 x i32> @vsra_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
636 ; CHECK-LABEL: vsra_vv_nxv4i32:
638 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
639 ; CHECK-NEXT: vsra.vv v8, v8, v10
641 %vc = ashr <vscale x 4 x i32> %va, %vb
642 ret <vscale x 4 x i32> %vc
645 define <vscale x 4 x i32> @vsra_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
646 ; CHECK-LABEL: vsra_vx_nxv4i32:
648 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
649 ; CHECK-NEXT: vsra.vx v8, v8, a0
651 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
652 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
653 %vc = ashr <vscale x 4 x i32> %va, %splat
654 ret <vscale x 4 x i32> %vc
657 define <vscale x 4 x i32> @vsra_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
658 ; CHECK-LABEL: vsra_vi_nxv4i32_0:
660 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
661 ; CHECK-NEXT: vsra.vi v8, v8, 31
663 %vc = ashr <vscale x 4 x i32> %va, splat (i32 31)
664 ret <vscale x 4 x i32> %vc
667 define <vscale x 8 x i32> @vsra_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
668 ; CHECK-LABEL: vsra_vv_nxv8i32:
670 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
671 ; CHECK-NEXT: vsra.vv v8, v8, v12
673 %vc = ashr <vscale x 8 x i32> %va, %vb
674 ret <vscale x 8 x i32> %vc
677 define <vscale x 8 x i32> @vsra_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
678 ; CHECK-LABEL: vsra_vx_nxv8i32:
680 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
681 ; CHECK-NEXT: vsra.vx v8, v8, a0
683 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
684 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
685 %vc = ashr <vscale x 8 x i32> %va, %splat
686 ret <vscale x 8 x i32> %vc
689 define <vscale x 8 x i32> @vsra_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
690 ; CHECK-LABEL: vsra_vi_nxv8i32_0:
692 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
693 ; CHECK-NEXT: vsra.vi v8, v8, 31
695 %vc = ashr <vscale x 8 x i32> %va, splat (i32 31)
696 ret <vscale x 8 x i32> %vc
699 define <vscale x 16 x i32> @vsra_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
700 ; CHECK-LABEL: vsra_vv_nxv16i32:
702 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
703 ; CHECK-NEXT: vsra.vv v8, v8, v16
705 %vc = ashr <vscale x 16 x i32> %va, %vb
706 ret <vscale x 16 x i32> %vc
709 define <vscale x 16 x i32> @vsra_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
710 ; CHECK-LABEL: vsra_vx_nxv16i32:
712 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
713 ; CHECK-NEXT: vsra.vx v8, v8, a0
715 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
716 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
717 %vc = ashr <vscale x 16 x i32> %va, %splat
718 ret <vscale x 16 x i32> %vc
721 define <vscale x 16 x i32> @vsra_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
722 ; CHECK-LABEL: vsra_vi_nxv16i32_0:
724 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
725 ; CHECK-NEXT: vsra.vi v8, v8, 31
727 %vc = ashr <vscale x 16 x i32> %va, splat (i32 31)
728 ret <vscale x 16 x i32> %vc
731 define <vscale x 1 x i64> @vsra_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
732 ; CHECK-LABEL: vsra_vv_nxv1i64:
734 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
735 ; CHECK-NEXT: vsra.vv v8, v8, v9
737 %vc = ashr <vscale x 1 x i64> %va, %vb
738 ret <vscale x 1 x i64> %vc
741 define <vscale x 1 x i64> @vsra_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
742 ; CHECK-LABEL: vsra_vx_nxv1i64:
744 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
745 ; CHECK-NEXT: vsra.vx v8, v8, a0
747 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
748 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
749 %vc = ashr <vscale x 1 x i64> %va, %splat
750 ret <vscale x 1 x i64> %vc
753 define <vscale x 1 x i64> @vsra_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
754 ; CHECK-LABEL: vsra_vi_nxv1i64_0:
756 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
757 ; CHECK-NEXT: vsra.vi v8, v8, 31
759 %vc = ashr <vscale x 1 x i64> %va, splat (i64 31)
760 ret <vscale x 1 x i64> %vc
763 define <vscale x 1 x i64> @vsra_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
764 ; CHECK-LABEL: vsra_vi_nxv1i64_1:
766 ; CHECK-NEXT: li a0, 32
767 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
768 ; CHECK-NEXT: vsra.vx v8, v8, a0
770 %vc = ashr <vscale x 1 x i64> %va, splat (i64 32)
771 ret <vscale x 1 x i64> %vc
774 define <vscale x 2 x i64> @vsra_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
775 ; CHECK-LABEL: vsra_vv_nxv2i64:
777 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
778 ; CHECK-NEXT: vsra.vv v8, v8, v10
780 %vc = ashr <vscale x 2 x i64> %va, %vb
781 ret <vscale x 2 x i64> %vc
784 define <vscale x 2 x i64> @vsra_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
785 ; CHECK-LABEL: vsra_vx_nxv2i64:
787 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
788 ; CHECK-NEXT: vsra.vx v8, v8, a0
790 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
791 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
792 %vc = ashr <vscale x 2 x i64> %va, %splat
793 ret <vscale x 2 x i64> %vc
796 define <vscale x 2 x i64> @vsra_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
797 ; CHECK-LABEL: vsra_vi_nxv2i64_0:
799 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
800 ; CHECK-NEXT: vsra.vi v8, v8, 31
802 %vc = ashr <vscale x 2 x i64> %va, splat (i64 31)
803 ret <vscale x 2 x i64> %vc
806 define <vscale x 2 x i64> @vsra_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
807 ; CHECK-LABEL: vsra_vi_nxv2i64_1:
809 ; CHECK-NEXT: li a0, 32
810 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
811 ; CHECK-NEXT: vsra.vx v8, v8, a0
813 %vc = ashr <vscale x 2 x i64> %va, splat (i64 32)
814 ret <vscale x 2 x i64> %vc
817 define <vscale x 4 x i64> @vsra_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
818 ; CHECK-LABEL: vsra_vv_nxv4i64:
820 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
821 ; CHECK-NEXT: vsra.vv v8, v8, v12
823 %vc = ashr <vscale x 4 x i64> %va, %vb
824 ret <vscale x 4 x i64> %vc
827 define <vscale x 4 x i64> @vsra_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
828 ; CHECK-LABEL: vsra_vx_nxv4i64:
830 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
831 ; CHECK-NEXT: vsra.vx v8, v8, a0
833 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
834 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
835 %vc = ashr <vscale x 4 x i64> %va, %splat
836 ret <vscale x 4 x i64> %vc
839 define <vscale x 4 x i64> @vsra_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
840 ; CHECK-LABEL: vsra_vi_nxv4i64_0:
842 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
843 ; CHECK-NEXT: vsra.vi v8, v8, 31
845 %vc = ashr <vscale x 4 x i64> %va, splat (i64 31)
846 ret <vscale x 4 x i64> %vc
849 define <vscale x 4 x i64> @vsra_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
850 ; CHECK-LABEL: vsra_vi_nxv4i64_1:
852 ; CHECK-NEXT: li a0, 32
853 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
854 ; CHECK-NEXT: vsra.vx v8, v8, a0
856 %vc = ashr <vscale x 4 x i64> %va, splat (i64 32)
857 ret <vscale x 4 x i64> %vc
860 define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
861 ; CHECK-LABEL: vsra_vv_nxv8i64:
863 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
864 ; CHECK-NEXT: vsra.vv v8, v8, v16
866 %vc = ashr <vscale x 8 x i64> %va, %vb
867 ret <vscale x 8 x i64> %vc
870 define <vscale x 8 x i64> @vsra_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
871 ; CHECK-LABEL: vsra_vx_nxv8i64:
873 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
874 ; CHECK-NEXT: vsra.vx v8, v8, a0
876 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
877 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
878 %vc = ashr <vscale x 8 x i64> %va, %splat
879 ret <vscale x 8 x i64> %vc
882 define <vscale x 8 x i64> @vsra_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
883 ; CHECK-LABEL: vsra_vi_nxv8i64_0:
885 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
886 ; CHECK-NEXT: vsra.vi v8, v8, 31
888 %vc = ashr <vscale x 8 x i64> %va, splat (i64 31)
889 ret <vscale x 8 x i64> %vc
892 define <vscale x 8 x i64> @vsra_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
893 ; CHECK-LABEL: vsra_vi_nxv8i64_1:
895 ; CHECK-NEXT: li a0, 32
896 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
897 ; CHECK-NEXT: vsra.vx v8, v8, a0
899 %vc = ashr <vscale x 8 x i64> %va, splat (i64 32)
900 ret <vscale x 8 x i64> %vc
903 define <vscale x 8 x i32> @vsra_vv_mask_nxv4i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) {
904 ; CHECK-LABEL: vsra_vv_mask_nxv4i32:
906 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
907 ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
909 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> zeroinitializer
910 %vc = ashr <vscale x 8 x i32> %va, %vs
911 ret <vscale x 8 x i32> %vc
914 define <vscale x 8 x i32> @vsra_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) {
915 ; CHECK-LABEL: vsra_vx_mask_nxv8i32:
917 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
918 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
920 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
921 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
922 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
923 %vc = ashr <vscale x 8 x i32> %va, %vs
924 ret <vscale x 8 x i32> %vc
927 define <vscale x 8 x i32> @vsra_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
928 ; CHECK-LABEL: vsra_vi_mask_nxv8i32:
930 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
931 ; CHECK-NEXT: vsra.vi v8, v8, 31, v0.t
933 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 31), <vscale x 8 x i32> zeroinitializer
934 %vc = ashr <vscale x 8 x i32> %va, %vs
935 ret <vscale x 8 x i32> %vc