1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
9 define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vsra_vv_v8i7:
12 ; CHECK-NEXT: li a1, 127
13 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
14 ; CHECK-NEXT: vand.vx v9, v9, a1, v0.t
15 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
16 ; CHECK-NEXT: vsra.vi v8, v8, 1, v0.t
17 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
19 %v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
23 declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
25 define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
26 ; CHECK-LABEL: vsra_vv_v2i8:
28 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
29 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
31 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
35 define <2 x i8> @vsra_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
36 ; CHECK-LABEL: vsra_vv_v2i8_unmasked:
38 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
39 ; CHECK-NEXT: vsra.vv v8, v8, v9
41 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
45 define <2 x i8> @vsra_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
46 ; CHECK-LABEL: vsra_vx_v2i8:
48 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
49 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
51 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
52 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
53 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
57 define <2 x i8> @vsra_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
58 ; CHECK-LABEL: vsra_vx_v2i8_unmasked:
60 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
61 ; CHECK-NEXT: vsra.vx v8, v8, a0
63 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
64 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
65 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
69 define <2 x i8> @vsra_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
70 ; CHECK-LABEL: vsra_vi_v2i8:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
73 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
75 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> %m, i32 %evl)
79 define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
80 ; CHECK-LABEL: vsra_vi_v2i8_unmasked:
82 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
83 ; CHECK-NEXT: vsra.vi v8, v8, 5
85 %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> splat (i1 true), i32 %evl)
89 declare <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
91 define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
92 ; CHECK-LABEL: vsra_vv_v4i8:
94 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
95 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
97 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
101 define <4 x i8> @vsra_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
102 ; CHECK-LABEL: vsra_vv_v4i8_unmasked:
104 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
105 ; CHECK-NEXT: vsra.vv v8, v8, v9
107 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
111 define <4 x i8> @vsra_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
112 ; CHECK-LABEL: vsra_vx_v4i8:
114 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
115 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
117 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
118 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
119 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
123 define <4 x i8> @vsra_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
124 ; CHECK-LABEL: vsra_vx_v4i8_unmasked:
126 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
127 ; CHECK-NEXT: vsra.vx v8, v8, a0
129 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
130 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
131 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
135 define <4 x i8> @vsra_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
136 ; CHECK-LABEL: vsra_vi_v4i8:
138 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
139 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
141 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> %m, i32 %evl)
145 define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
146 ; CHECK-LABEL: vsra_vi_v4i8_unmasked:
148 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
149 ; CHECK-NEXT: vsra.vi v8, v8, 5
151 %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> splat (i1 true), i32 %evl)
155 declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
157 define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
158 ; CHECK-LABEL: vsra_vv_v7i8:
160 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
161 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
163 %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
167 declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
169 define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
170 ; CHECK-LABEL: vsra_vv_v8i8:
172 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
173 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
175 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
179 define <8 x i8> @vsra_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
180 ; CHECK-LABEL: vsra_vv_v8i8_unmasked:
182 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
183 ; CHECK-NEXT: vsra.vv v8, v8, v9
185 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
189 define <8 x i8> @vsra_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
190 ; CHECK-LABEL: vsra_vx_v8i8:
192 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
193 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
195 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
196 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
197 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
201 define <8 x i8> @vsra_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
202 ; CHECK-LABEL: vsra_vx_v8i8_unmasked:
204 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
205 ; CHECK-NEXT: vsra.vx v8, v8, a0
207 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
208 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
209 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
213 define <8 x i8> @vsra_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
214 ; CHECK-LABEL: vsra_vi_v8i8:
216 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
217 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
219 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> %m, i32 %evl)
223 define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
224 ; CHECK-LABEL: vsra_vi_v8i8_unmasked:
226 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
227 ; CHECK-NEXT: vsra.vi v8, v8, 5
229 %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> splat (i1 true), i32 %evl)
233 declare <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
235 define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
236 ; CHECK-LABEL: vsra_vv_v16i8:
238 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
239 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
241 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
245 define <16 x i8> @vsra_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
246 ; CHECK-LABEL: vsra_vv_v16i8_unmasked:
248 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
249 ; CHECK-NEXT: vsra.vv v8, v8, v9
251 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
255 define <16 x i8> @vsra_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
256 ; CHECK-LABEL: vsra_vx_v16i8:
258 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
259 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
261 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
262 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
263 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
267 define <16 x i8> @vsra_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
268 ; CHECK-LABEL: vsra_vx_v16i8_unmasked:
270 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
271 ; CHECK-NEXT: vsra.vx v8, v8, a0
273 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
274 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
275 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
279 define <16 x i8> @vsra_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
280 ; CHECK-LABEL: vsra_vi_v16i8:
282 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
283 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
285 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> %m, i32 %evl)
289 define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
290 ; CHECK-LABEL: vsra_vi_v16i8_unmasked:
292 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
293 ; CHECK-NEXT: vsra.vi v8, v8, 5
295 %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> splat (i1 true), i32 %evl)
299 declare <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
301 define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
302 ; CHECK-LABEL: vsra_vv_v2i16:
304 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
305 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
307 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
311 define <2 x i16> @vsra_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
312 ; CHECK-LABEL: vsra_vv_v2i16_unmasked:
314 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
315 ; CHECK-NEXT: vsra.vv v8, v8, v9
317 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
321 define <2 x i16> @vsra_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
322 ; CHECK-LABEL: vsra_vx_v2i16:
324 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
325 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
327 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
328 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
329 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
333 define <2 x i16> @vsra_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
334 ; CHECK-LABEL: vsra_vx_v2i16_unmasked:
336 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
337 ; CHECK-NEXT: vsra.vx v8, v8, a0
339 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
340 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
341 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
345 define <2 x i16> @vsra_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vsra_vi_v2i16:
348 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
349 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
351 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> %m, i32 %evl)
355 define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
356 ; CHECK-LABEL: vsra_vi_v2i16_unmasked:
358 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
359 ; CHECK-NEXT: vsra.vi v8, v8, 5
361 %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> splat (i1 true), i32 %evl)
365 declare <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
367 define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
368 ; CHECK-LABEL: vsra_vv_v4i16:
370 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
371 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
373 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
377 define <4 x i16> @vsra_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
378 ; CHECK-LABEL: vsra_vv_v4i16_unmasked:
380 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
381 ; CHECK-NEXT: vsra.vv v8, v8, v9
383 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
387 define <4 x i16> @vsra_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
388 ; CHECK-LABEL: vsra_vx_v4i16:
390 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
391 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
393 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
394 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
395 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
399 define <4 x i16> @vsra_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
400 ; CHECK-LABEL: vsra_vx_v4i16_unmasked:
402 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
403 ; CHECK-NEXT: vsra.vx v8, v8, a0
405 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
406 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
407 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
411 define <4 x i16> @vsra_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
412 ; CHECK-LABEL: vsra_vi_v4i16:
414 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
415 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
417 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> %m, i32 %evl)
421 define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
422 ; CHECK-LABEL: vsra_vi_v4i16_unmasked:
424 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
425 ; CHECK-NEXT: vsra.vi v8, v8, 5
427 %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> splat (i1 true), i32 %evl)
431 declare <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
433 define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
434 ; CHECK-LABEL: vsra_vv_v8i16:
436 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
437 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
439 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
443 define <8 x i16> @vsra_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
444 ; CHECK-LABEL: vsra_vv_v8i16_unmasked:
446 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
447 ; CHECK-NEXT: vsra.vv v8, v8, v9
449 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
453 define <8 x i16> @vsra_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
454 ; CHECK-LABEL: vsra_vx_v8i16:
456 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
457 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
459 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
460 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
461 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
465 define <8 x i16> @vsra_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
466 ; CHECK-LABEL: vsra_vx_v8i16_unmasked:
468 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
469 ; CHECK-NEXT: vsra.vx v8, v8, a0
471 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
472 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
473 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
477 define <8 x i16> @vsra_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
478 ; CHECK-LABEL: vsra_vi_v8i16:
480 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
481 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
483 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> %m, i32 %evl)
487 define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
488 ; CHECK-LABEL: vsra_vi_v8i16_unmasked:
490 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
491 ; CHECK-NEXT: vsra.vi v8, v8, 5
493 %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> splat (i1 true), i32 %evl)
497 declare <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
499 define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
500 ; CHECK-LABEL: vsra_vv_v16i16:
502 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
503 ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
505 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
509 define <16 x i16> @vsra_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
510 ; CHECK-LABEL: vsra_vv_v16i16_unmasked:
512 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
513 ; CHECK-NEXT: vsra.vv v8, v8, v10
515 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
519 define <16 x i16> @vsra_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
520 ; CHECK-LABEL: vsra_vx_v16i16:
522 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
523 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
525 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
526 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
527 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
531 define <16 x i16> @vsra_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
532 ; CHECK-LABEL: vsra_vx_v16i16_unmasked:
534 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
535 ; CHECK-NEXT: vsra.vx v8, v8, a0
537 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
538 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
539 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
543 define <16 x i16> @vsra_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
544 ; CHECK-LABEL: vsra_vi_v16i16:
546 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
547 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
549 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> %m, i32 %evl)
553 define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
554 ; CHECK-LABEL: vsra_vi_v16i16_unmasked:
556 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
557 ; CHECK-NEXT: vsra.vi v8, v8, 5
559 %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> splat (i1 true), i32 %evl)
563 declare <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
565 define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
566 ; CHECK-LABEL: vsra_vv_v2i32:
568 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
569 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
571 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
575 define <2 x i32> @vsra_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
576 ; CHECK-LABEL: vsra_vv_v2i32_unmasked:
578 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
579 ; CHECK-NEXT: vsra.vv v8, v8, v9
581 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
585 define <2 x i32> @vsra_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
586 ; CHECK-LABEL: vsra_vx_v2i32:
588 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
589 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
591 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
592 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
593 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
597 define <2 x i32> @vsra_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
598 ; CHECK-LABEL: vsra_vx_v2i32_unmasked:
600 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
601 ; CHECK-NEXT: vsra.vx v8, v8, a0
603 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
604 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
605 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
609 define <2 x i32> @vsra_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
610 ; CHECK-LABEL: vsra_vi_v2i32:
612 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
613 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
615 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> %m, i32 %evl)
619 define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
620 ; CHECK-LABEL: vsra_vi_v2i32_unmasked:
622 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
623 ; CHECK-NEXT: vsra.vi v8, v8, 5
625 %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> splat (i1 true), i32 %evl)
629 declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
631 define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
632 ; CHECK-LABEL: vsra_vv_v4i32:
634 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
635 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
637 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
641 define <4 x i32> @vsra_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
642 ; CHECK-LABEL: vsra_vv_v4i32_unmasked:
644 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
645 ; CHECK-NEXT: vsra.vv v8, v8, v9
647 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
651 define <4 x i32> @vsra_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
652 ; CHECK-LABEL: vsra_vx_v4i32:
654 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
655 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
657 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
658 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
659 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
663 define <4 x i32> @vsra_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
664 ; CHECK-LABEL: vsra_vx_v4i32_unmasked:
666 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
667 ; CHECK-NEXT: vsra.vx v8, v8, a0
669 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
670 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
671 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
675 define <4 x i32> @vsra_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
676 ; CHECK-LABEL: vsra_vi_v4i32:
678 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
679 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
681 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> %m, i32 %evl)
685 define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
686 ; CHECK-LABEL: vsra_vi_v4i32_unmasked:
688 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
689 ; CHECK-NEXT: vsra.vi v8, v8, 5
691 %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> splat (i1 true), i32 %evl)
695 declare <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
697 define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
698 ; CHECK-LABEL: vsra_vv_v8i32:
700 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
701 ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
703 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
707 define <8 x i32> @vsra_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
708 ; CHECK-LABEL: vsra_vv_v8i32_unmasked:
710 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
711 ; CHECK-NEXT: vsra.vv v8, v8, v10
713 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
717 define <8 x i32> @vsra_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
718 ; CHECK-LABEL: vsra_vx_v8i32:
720 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
721 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
723 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
724 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
725 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
729 define <8 x i32> @vsra_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
730 ; CHECK-LABEL: vsra_vx_v8i32_unmasked:
732 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
733 ; CHECK-NEXT: vsra.vx v8, v8, a0
735 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
736 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
737 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
741 define <8 x i32> @vsra_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
742 ; CHECK-LABEL: vsra_vi_v8i32:
744 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
745 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
747 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> %m, i32 %evl)
751 define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
752 ; CHECK-LABEL: vsra_vi_v8i32_unmasked:
754 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
755 ; CHECK-NEXT: vsra.vi v8, v8, 5
757 %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> splat (i1 true), i32 %evl)
761 declare <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
763 define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
764 ; CHECK-LABEL: vsra_vv_v16i32:
766 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
767 ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
769 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
773 define <16 x i32> @vsra_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
774 ; CHECK-LABEL: vsra_vv_v16i32_unmasked:
776 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
777 ; CHECK-NEXT: vsra.vv v8, v8, v12
779 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
783 define <16 x i32> @vsra_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
784 ; CHECK-LABEL: vsra_vx_v16i32:
786 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
787 ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
789 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
790 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
791 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
795 define <16 x i32> @vsra_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
796 ; CHECK-LABEL: vsra_vx_v16i32_unmasked:
798 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
799 ; CHECK-NEXT: vsra.vx v8, v8, a0
801 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
802 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
803 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
807 define <16 x i32> @vsra_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
808 ; CHECK-LABEL: vsra_vi_v16i32:
810 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
811 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
813 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> %m, i32 %evl)
817 define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
818 ; CHECK-LABEL: vsra_vi_v16i32_unmasked:
820 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
821 ; CHECK-NEXT: vsra.vi v8, v8, 5
823 %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> splat (i1 true), i32 %evl)
827 declare <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
829 define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
830 ; CHECK-LABEL: vsra_vv_v2i64:
832 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
833 ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
835 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
839 define <2 x i64> @vsra_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
840 ; CHECK-LABEL: vsra_vv_v2i64_unmasked:
842 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
843 ; CHECK-NEXT: vsra.vv v8, v8, v9
845 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
849 define <2 x i64> @vsra_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
850 ; RV32-LABEL: vsra_vx_v2i64:
852 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
853 ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
856 ; RV64-LABEL: vsra_vx_v2i64:
858 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
859 ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
861 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
862 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
863 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
867 define <2 x i64> @vsra_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
868 ; RV32-LABEL: vsra_vx_v2i64_unmasked:
870 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
871 ; RV32-NEXT: vsra.vx v8, v8, a0
874 ; RV64-LABEL: vsra_vx_v2i64_unmasked:
876 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
877 ; RV64-NEXT: vsra.vx v8, v8, a0
879 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
880 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
881 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
885 define <2 x i64> @vsra_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
886 ; CHECK-LABEL: vsra_vi_v2i64:
888 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
889 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
891 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> %m, i32 %evl)
895 define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
896 ; CHECK-LABEL: vsra_vi_v2i64_unmasked:
898 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
899 ; CHECK-NEXT: vsra.vi v8, v8, 5
901 %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> splat (i1 true), i32 %evl)
905 declare <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
907 define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
908 ; CHECK-LABEL: vsra_vv_v4i64:
910 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
911 ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
913 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
917 define <4 x i64> @vsra_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
918 ; CHECK-LABEL: vsra_vv_v4i64_unmasked:
920 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
921 ; CHECK-NEXT: vsra.vv v8, v8, v10
923 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
927 define <4 x i64> @vsra_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
928 ; RV32-LABEL: vsra_vx_v4i64:
930 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
931 ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
934 ; RV64-LABEL: vsra_vx_v4i64:
936 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
937 ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
939 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
940 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
941 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
945 define <4 x i64> @vsra_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
946 ; RV32-LABEL: vsra_vx_v4i64_unmasked:
948 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
949 ; RV32-NEXT: vsra.vx v8, v8, a0
952 ; RV64-LABEL: vsra_vx_v4i64_unmasked:
954 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
955 ; RV64-NEXT: vsra.vx v8, v8, a0
957 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
958 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
959 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
963 define <4 x i64> @vsra_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
964 ; CHECK-LABEL: vsra_vi_v4i64:
966 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
967 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
969 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> %m, i32 %evl)
973 define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
974 ; CHECK-LABEL: vsra_vi_v4i64_unmasked:
976 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
977 ; CHECK-NEXT: vsra.vi v8, v8, 5
979 %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> splat (i1 true), i32 %evl)
983 declare <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
985 define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
986 ; CHECK-LABEL: vsra_vv_v8i64:
988 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
989 ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
991 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
995 define <8 x i64> @vsra_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
996 ; CHECK-LABEL: vsra_vv_v8i64_unmasked:
998 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
999 ; CHECK-NEXT: vsra.vv v8, v8, v12
1001 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
1005 define <8 x i64> @vsra_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
1006 ; RV32-LABEL: vsra_vx_v8i64:
1008 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1009 ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
1012 ; RV64-LABEL: vsra_vx_v8i64:
1014 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1015 ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
1017 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1018 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1019 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
1023 define <8 x i64> @vsra_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
1024 ; RV32-LABEL: vsra_vx_v8i64_unmasked:
1026 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1027 ; RV32-NEXT: vsra.vx v8, v8, a0
1030 ; RV64-LABEL: vsra_vx_v8i64_unmasked:
1032 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1033 ; RV64-NEXT: vsra.vx v8, v8, a0
1035 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1036 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1037 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
1041 define <8 x i64> @vsra_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
1042 ; CHECK-LABEL: vsra_vi_v8i64:
1044 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1045 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
1047 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> %m, i32 %evl)
1051 define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
1052 ; CHECK-LABEL: vsra_vi_v8i64_unmasked:
1054 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1055 ; CHECK-NEXT: vsra.vi v8, v8, 5
1057 %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> splat (i1 true), i32 %evl)
1061 declare <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
1063 define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
1064 ; CHECK-LABEL: vsra_vv_v16i64:
1066 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1067 ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
1069 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
1073 define <16 x i64> @vsra_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
1074 ; CHECK-LABEL: vsra_vv_v16i64_unmasked:
1076 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1077 ; CHECK-NEXT: vsra.vv v8, v8, v16
1079 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
1083 define <16 x i64> @vsra_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
1084 ; RV32-LABEL: vsra_vx_v16i64:
1086 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1087 ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
1090 ; RV64-LABEL: vsra_vx_v16i64:
1092 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1093 ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
1095 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1096 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1097 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
1101 define <16 x i64> @vsra_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
1102 ; RV32-LABEL: vsra_vx_v16i64_unmasked:
1104 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1105 ; RV32-NEXT: vsra.vx v8, v8, a0
1108 ; RV64-LABEL: vsra_vx_v16i64_unmasked:
1110 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1111 ; RV64-NEXT: vsra.vx v8, v8, a0
1113 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1114 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1115 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
1119 define <16 x i64> @vsra_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
1120 ; CHECK-LABEL: vsra_vi_v16i64:
1122 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1123 ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
1125 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> %m, i32 %evl)
1129 define <16 x i64> @vsra_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
1130 ; CHECK-LABEL: vsra_vi_v16i64_unmasked:
1132 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1133 ; CHECK-NEXT: vsra.vi v8, v8, 5
1135 %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> splat (i1 true), i32 %evl)