1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.shl.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vsll_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vsll_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
13 ; CHECK-NEXT: vmv.v.x v9, a0
14 ; CHECK-NEXT: li a0, 127
15 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
16 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
17 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
19 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
20 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
21 %v = call <vscale x 8 x i7> @llvm.vp.shl.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
22 ret <vscale x 8 x i7> %v
25 declare <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
27 define <vscale x 1 x i8> @vsll_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
28 ; CHECK-LABEL: vsll_vv_nxv1i8:
30 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
31 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
33 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
34 ret <vscale x 1 x i8> %v
37 define <vscale x 1 x i8> @vsll_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
38 ; CHECK-LABEL: vsll_vv_nxv1i8_unmasked:
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
41 ; CHECK-NEXT: vsll.vv v8, v8, v9
43 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
44 ret <vscale x 1 x i8> %v
47 define <vscale x 1 x i8> @vsll_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
48 ; CHECK-LABEL: vsll_vx_nxv1i8:
50 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
51 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
53 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
54 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
55 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
56 ret <vscale x 1 x i8> %v
59 define <vscale x 1 x i8> @vsll_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
60 ; CHECK-LABEL: vsll_vx_nxv1i8_unmasked:
62 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
63 ; CHECK-NEXT: vsll.vx v8, v8, a0
65 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
66 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
67 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
68 ret <vscale x 1 x i8> %v
71 define <vscale x 1 x i8> @vsll_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
72 ; CHECK-LABEL: vsll_vi_nxv1i8:
74 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
75 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
77 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 3), <vscale x 1 x i1> %m, i32 %evl)
78 ret <vscale x 1 x i8> %v
81 define <vscale x 1 x i8> @vsll_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
82 ; CHECK-LABEL: vsll_vi_nxv1i8_unmasked:
84 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
85 ; CHECK-NEXT: vsll.vi v8, v8, 3
87 %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
88 ret <vscale x 1 x i8> %v
91 declare <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
93 define <vscale x 2 x i8> @vsll_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
94 ; CHECK-LABEL: vsll_vv_nxv2i8:
96 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
97 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
99 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
100 ret <vscale x 2 x i8> %v
103 define <vscale x 2 x i8> @vsll_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
104 ; CHECK-LABEL: vsll_vv_nxv2i8_unmasked:
106 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
107 ; CHECK-NEXT: vsll.vv v8, v8, v9
109 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
110 ret <vscale x 2 x i8> %v
113 define <vscale x 2 x i8> @vsll_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
114 ; CHECK-LABEL: vsll_vx_nxv2i8:
116 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
117 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
119 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
120 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
121 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
122 ret <vscale x 2 x i8> %v
125 define <vscale x 2 x i8> @vsll_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
126 ; CHECK-LABEL: vsll_vx_nxv2i8_unmasked:
128 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
129 ; CHECK-NEXT: vsll.vx v8, v8, a0
131 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
132 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
133 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
134 ret <vscale x 2 x i8> %v
137 define <vscale x 2 x i8> @vsll_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
138 ; CHECK-LABEL: vsll_vi_nxv2i8:
140 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
141 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
143 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 3), <vscale x 2 x i1> %m, i32 %evl)
144 ret <vscale x 2 x i8> %v
147 define <vscale x 2 x i8> @vsll_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
148 ; CHECK-LABEL: vsll_vi_nxv2i8_unmasked:
150 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
151 ; CHECK-NEXT: vsll.vi v8, v8, 3
153 %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
154 ret <vscale x 2 x i8> %v
157 declare <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
159 define <vscale x 4 x i8> @vsll_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
160 ; CHECK-LABEL: vsll_vv_nxv4i8:
162 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
163 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
165 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
166 ret <vscale x 4 x i8> %v
169 define <vscale x 4 x i8> @vsll_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
170 ; CHECK-LABEL: vsll_vv_nxv4i8_unmasked:
172 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
173 ; CHECK-NEXT: vsll.vv v8, v8, v9
175 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
176 ret <vscale x 4 x i8> %v
179 define <vscale x 4 x i8> @vsll_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
180 ; CHECK-LABEL: vsll_vx_nxv4i8:
182 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
183 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
185 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
186 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
187 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
188 ret <vscale x 4 x i8> %v
191 define <vscale x 4 x i8> @vsll_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
192 ; CHECK-LABEL: vsll_vx_nxv4i8_unmasked:
194 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
195 ; CHECK-NEXT: vsll.vx v8, v8, a0
197 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
198 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
199 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
200 ret <vscale x 4 x i8> %v
203 define <vscale x 4 x i8> @vsll_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
204 ; CHECK-LABEL: vsll_vi_nxv4i8:
206 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
207 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
209 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 3), <vscale x 4 x i1> %m, i32 %evl)
210 ret <vscale x 4 x i8> %v
213 define <vscale x 4 x i8> @vsll_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
214 ; CHECK-LABEL: vsll_vi_nxv4i8_unmasked:
216 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
217 ; CHECK-NEXT: vsll.vi v8, v8, 3
219 %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
220 ret <vscale x 4 x i8> %v
223 declare <vscale x 5 x i8> @llvm.vp.shl.nxv5i8(<vscale x 5 x i8>, <vscale x 5 x i8>, <vscale x 5 x i1>, i32)
225 define <vscale x 5 x i8> @vsll_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vsll_vv_nxv5i8:
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
229 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
231 %v = call <vscale x 5 x i8> @llvm.vp.shl.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
232 ret <vscale x 5 x i8> %v
235 declare <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
237 define <vscale x 8 x i8> @vsll_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
238 ; CHECK-LABEL: vsll_vv_nxv8i8:
240 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
241 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
243 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
244 ret <vscale x 8 x i8> %v
247 define <vscale x 8 x i8> @vsll_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
248 ; CHECK-LABEL: vsll_vv_nxv8i8_unmasked:
250 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
251 ; CHECK-NEXT: vsll.vv v8, v8, v9
253 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
254 ret <vscale x 8 x i8> %v
257 define <vscale x 8 x i8> @vsll_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
258 ; CHECK-LABEL: vsll_vx_nxv8i8:
260 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
261 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
263 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
264 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
265 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
266 ret <vscale x 8 x i8> %v
269 define <vscale x 8 x i8> @vsll_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
270 ; CHECK-LABEL: vsll_vx_nxv8i8_unmasked:
272 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
273 ; CHECK-NEXT: vsll.vx v8, v8, a0
275 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
276 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
277 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
278 ret <vscale x 8 x i8> %v
281 define <vscale x 8 x i8> @vsll_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
282 ; CHECK-LABEL: vsll_vi_nxv8i8:
284 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
285 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
287 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 3), <vscale x 8 x i1> %m, i32 %evl)
288 ret <vscale x 8 x i8> %v
291 define <vscale x 8 x i8> @vsll_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
292 ; CHECK-LABEL: vsll_vi_nxv8i8_unmasked:
294 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
295 ; CHECK-NEXT: vsll.vi v8, v8, 3
297 %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
298 ret <vscale x 8 x i8> %v
301 declare <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
303 define <vscale x 16 x i8> @vsll_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
304 ; CHECK-LABEL: vsll_vv_nxv16i8:
306 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
307 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
309 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
310 ret <vscale x 16 x i8> %v
313 define <vscale x 16 x i8> @vsll_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
314 ; CHECK-LABEL: vsll_vv_nxv16i8_unmasked:
316 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
317 ; CHECK-NEXT: vsll.vv v8, v8, v10
319 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
320 ret <vscale x 16 x i8> %v
323 define <vscale x 16 x i8> @vsll_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
324 ; CHECK-LABEL: vsll_vx_nxv16i8:
326 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
327 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
329 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
330 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
331 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
332 ret <vscale x 16 x i8> %v
335 define <vscale x 16 x i8> @vsll_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
336 ; CHECK-LABEL: vsll_vx_nxv16i8_unmasked:
338 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
339 ; CHECK-NEXT: vsll.vx v8, v8, a0
341 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
342 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
343 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
344 ret <vscale x 16 x i8> %v
347 define <vscale x 16 x i8> @vsll_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
348 ; CHECK-LABEL: vsll_vi_nxv16i8:
350 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
351 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
353 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 3), <vscale x 16 x i1> %m, i32 %evl)
354 ret <vscale x 16 x i8> %v
357 define <vscale x 16 x i8> @vsll_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
358 ; CHECK-LABEL: vsll_vi_nxv16i8_unmasked:
360 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
361 ; CHECK-NEXT: vsll.vi v8, v8, 3
363 %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
364 ret <vscale x 16 x i8> %v
367 declare <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
369 define <vscale x 32 x i8> @vsll_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
370 ; CHECK-LABEL: vsll_vv_nxv32i8:
372 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
373 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
375 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
376 ret <vscale x 32 x i8> %v
379 define <vscale x 32 x i8> @vsll_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
380 ; CHECK-LABEL: vsll_vv_nxv32i8_unmasked:
382 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
383 ; CHECK-NEXT: vsll.vv v8, v8, v12
385 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
386 ret <vscale x 32 x i8> %v
389 define <vscale x 32 x i8> @vsll_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
390 ; CHECK-LABEL: vsll_vx_nxv32i8:
392 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
393 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
395 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
396 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
397 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
398 ret <vscale x 32 x i8> %v
401 define <vscale x 32 x i8> @vsll_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
402 ; CHECK-LABEL: vsll_vx_nxv32i8_unmasked:
404 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
405 ; CHECK-NEXT: vsll.vx v8, v8, a0
407 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
408 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
409 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
410 ret <vscale x 32 x i8> %v
413 define <vscale x 32 x i8> @vsll_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
414 ; CHECK-LABEL: vsll_vi_nxv32i8:
416 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
417 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
419 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 3), <vscale x 32 x i1> %m, i32 %evl)
420 ret <vscale x 32 x i8> %v
423 define <vscale x 32 x i8> @vsll_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
424 ; CHECK-LABEL: vsll_vi_nxv32i8_unmasked:
426 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
427 ; CHECK-NEXT: vsll.vi v8, v8, 3
429 %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 3), <vscale x 32 x i1> splat (i1 true), i32 %evl)
430 ret <vscale x 32 x i8> %v
433 declare <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
435 define <vscale x 64 x i8> @vsll_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
436 ; CHECK-LABEL: vsll_vv_nxv64i8:
438 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
439 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
441 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
442 ret <vscale x 64 x i8> %v
445 define <vscale x 64 x i8> @vsll_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
446 ; CHECK-LABEL: vsll_vv_nxv64i8_unmasked:
448 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
449 ; CHECK-NEXT: vsll.vv v8, v8, v16
451 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
452 ret <vscale x 64 x i8> %v
455 define <vscale x 64 x i8> @vsll_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
456 ; CHECK-LABEL: vsll_vx_nxv64i8:
458 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
459 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
461 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
462 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
463 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
464 ret <vscale x 64 x i8> %v
467 define <vscale x 64 x i8> @vsll_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
468 ; CHECK-LABEL: vsll_vx_nxv64i8_unmasked:
470 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
471 ; CHECK-NEXT: vsll.vx v8, v8, a0
473 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
474 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
475 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
476 ret <vscale x 64 x i8> %v
479 define <vscale x 64 x i8> @vsll_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
480 ; CHECK-LABEL: vsll_vi_nxv64i8:
482 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
483 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
485 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 3), <vscale x 64 x i1> %m, i32 %evl)
486 ret <vscale x 64 x i8> %v
489 define <vscale x 64 x i8> @vsll_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
490 ; CHECK-LABEL: vsll_vi_nxv64i8_unmasked:
492 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
493 ; CHECK-NEXT: vsll.vi v8, v8, 3
495 %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 3), <vscale x 64 x i1> splat (i1 true), i32 %evl)
496 ret <vscale x 64 x i8> %v
499 declare <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
501 define <vscale x 1 x i16> @vsll_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
502 ; CHECK-LABEL: vsll_vv_nxv1i16:
504 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
505 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
507 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
508 ret <vscale x 1 x i16> %v
511 define <vscale x 1 x i16> @vsll_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
512 ; CHECK-LABEL: vsll_vv_nxv1i16_unmasked:
514 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
515 ; CHECK-NEXT: vsll.vv v8, v8, v9
517 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
518 ret <vscale x 1 x i16> %v
521 define <vscale x 1 x i16> @vsll_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
522 ; CHECK-LABEL: vsll_vx_nxv1i16:
524 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
525 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
527 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
528 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
529 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
530 ret <vscale x 1 x i16> %v
533 define <vscale x 1 x i16> @vsll_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
534 ; CHECK-LABEL: vsll_vx_nxv1i16_unmasked:
536 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
537 ; CHECK-NEXT: vsll.vx v8, v8, a0
539 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
540 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
541 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
542 ret <vscale x 1 x i16> %v
545 define <vscale x 1 x i16> @vsll_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
546 ; CHECK-LABEL: vsll_vi_nxv1i16:
548 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
549 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
551 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 3), <vscale x 1 x i1> %m, i32 %evl)
552 ret <vscale x 1 x i16> %v
555 define <vscale x 1 x i16> @vsll_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
556 ; CHECK-LABEL: vsll_vi_nxv1i16_unmasked:
558 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
559 ; CHECK-NEXT: vsll.vi v8, v8, 3
561 %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
562 ret <vscale x 1 x i16> %v
565 declare <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
567 define <vscale x 2 x i16> @vsll_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
568 ; CHECK-LABEL: vsll_vv_nxv2i16:
570 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
571 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
573 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
574 ret <vscale x 2 x i16> %v
577 define <vscale x 2 x i16> @vsll_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
578 ; CHECK-LABEL: vsll_vv_nxv2i16_unmasked:
580 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
581 ; CHECK-NEXT: vsll.vv v8, v8, v9
583 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
584 ret <vscale x 2 x i16> %v
587 define <vscale x 2 x i16> @vsll_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
588 ; CHECK-LABEL: vsll_vx_nxv2i16:
590 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
591 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
593 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
594 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
595 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
596 ret <vscale x 2 x i16> %v
599 define <vscale x 2 x i16> @vsll_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
600 ; CHECK-LABEL: vsll_vx_nxv2i16_unmasked:
602 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
603 ; CHECK-NEXT: vsll.vx v8, v8, a0
605 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
606 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
607 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
608 ret <vscale x 2 x i16> %v
611 define <vscale x 2 x i16> @vsll_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
612 ; CHECK-LABEL: vsll_vi_nxv2i16:
614 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
615 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
617 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 3), <vscale x 2 x i1> %m, i32 %evl)
618 ret <vscale x 2 x i16> %v
621 define <vscale x 2 x i16> @vsll_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
622 ; CHECK-LABEL: vsll_vi_nxv2i16_unmasked:
624 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
625 ; CHECK-NEXT: vsll.vi v8, v8, 3
627 %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
628 ret <vscale x 2 x i16> %v
631 declare <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
633 define <vscale x 4 x i16> @vsll_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
634 ; CHECK-LABEL: vsll_vv_nxv4i16:
636 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
637 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
639 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
640 ret <vscale x 4 x i16> %v
643 define <vscale x 4 x i16> @vsll_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
644 ; CHECK-LABEL: vsll_vv_nxv4i16_unmasked:
646 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
647 ; CHECK-NEXT: vsll.vv v8, v8, v9
649 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
650 ret <vscale x 4 x i16> %v
653 define <vscale x 4 x i16> @vsll_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
654 ; CHECK-LABEL: vsll_vx_nxv4i16:
656 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
657 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
659 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
660 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
661 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
662 ret <vscale x 4 x i16> %v
665 define <vscale x 4 x i16> @vsll_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
666 ; CHECK-LABEL: vsll_vx_nxv4i16_unmasked:
668 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
669 ; CHECK-NEXT: vsll.vx v8, v8, a0
671 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
672 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
673 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
674 ret <vscale x 4 x i16> %v
677 define <vscale x 4 x i16> @vsll_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
678 ; CHECK-LABEL: vsll_vi_nxv4i16:
680 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
681 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
683 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 3), <vscale x 4 x i1> %m, i32 %evl)
684 ret <vscale x 4 x i16> %v
687 define <vscale x 4 x i16> @vsll_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
688 ; CHECK-LABEL: vsll_vi_nxv4i16_unmasked:
690 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
691 ; CHECK-NEXT: vsll.vi v8, v8, 3
693 %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
694 ret <vscale x 4 x i16> %v
697 declare <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
699 define <vscale x 8 x i16> @vsll_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
700 ; CHECK-LABEL: vsll_vv_nxv8i16:
702 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
703 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
705 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
706 ret <vscale x 8 x i16> %v
709 define <vscale x 8 x i16> @vsll_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
710 ; CHECK-LABEL: vsll_vv_nxv8i16_unmasked:
712 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
713 ; CHECK-NEXT: vsll.vv v8, v8, v10
715 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
716 ret <vscale x 8 x i16> %v
719 define <vscale x 8 x i16> @vsll_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
720 ; CHECK-LABEL: vsll_vx_nxv8i16:
722 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
723 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
725 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
726 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
727 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
728 ret <vscale x 8 x i16> %v
731 define <vscale x 8 x i16> @vsll_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
732 ; CHECK-LABEL: vsll_vx_nxv8i16_unmasked:
734 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
735 ; CHECK-NEXT: vsll.vx v8, v8, a0
737 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
738 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
739 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
740 ret <vscale x 8 x i16> %v
743 define <vscale x 8 x i16> @vsll_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
744 ; CHECK-LABEL: vsll_vi_nxv8i16:
746 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
747 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
749 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 3), <vscale x 8 x i1> %m, i32 %evl)
750 ret <vscale x 8 x i16> %v
753 define <vscale x 8 x i16> @vsll_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
754 ; CHECK-LABEL: vsll_vi_nxv8i16_unmasked:
756 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
757 ; CHECK-NEXT: vsll.vi v8, v8, 3
759 %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
760 ret <vscale x 8 x i16> %v
763 declare <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
765 define <vscale x 16 x i16> @vsll_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
766 ; CHECK-LABEL: vsll_vv_nxv16i16:
768 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
769 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
771 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
772 ret <vscale x 16 x i16> %v
775 define <vscale x 16 x i16> @vsll_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
776 ; CHECK-LABEL: vsll_vv_nxv16i16_unmasked:
778 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
779 ; CHECK-NEXT: vsll.vv v8, v8, v12
781 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
782 ret <vscale x 16 x i16> %v
785 define <vscale x 16 x i16> @vsll_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
786 ; CHECK-LABEL: vsll_vx_nxv16i16:
788 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
789 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
791 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
792 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
793 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
794 ret <vscale x 16 x i16> %v
797 define <vscale x 16 x i16> @vsll_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
798 ; CHECK-LABEL: vsll_vx_nxv16i16_unmasked:
800 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
801 ; CHECK-NEXT: vsll.vx v8, v8, a0
803 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
804 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
805 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
806 ret <vscale x 16 x i16> %v
809 define <vscale x 16 x i16> @vsll_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
810 ; CHECK-LABEL: vsll_vi_nxv16i16:
812 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
813 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
815 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 3), <vscale x 16 x i1> %m, i32 %evl)
816 ret <vscale x 16 x i16> %v
819 define <vscale x 16 x i16> @vsll_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
820 ; CHECK-LABEL: vsll_vi_nxv16i16_unmasked:
822 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
823 ; CHECK-NEXT: vsll.vi v8, v8, 3
825 %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
826 ret <vscale x 16 x i16> %v
829 declare <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
831 define <vscale x 32 x i16> @vsll_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
832 ; CHECK-LABEL: vsll_vv_nxv32i16:
834 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
835 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
837 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
838 ret <vscale x 32 x i16> %v
841 define <vscale x 32 x i16> @vsll_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
842 ; CHECK-LABEL: vsll_vv_nxv32i16_unmasked:
844 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
845 ; CHECK-NEXT: vsll.vv v8, v8, v16
847 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
848 ret <vscale x 32 x i16> %v
851 define <vscale x 32 x i16> @vsll_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
852 ; CHECK-LABEL: vsll_vx_nxv32i16:
854 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
855 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
857 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
858 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
859 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
860 ret <vscale x 32 x i16> %v
863 define <vscale x 32 x i16> @vsll_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
864 ; CHECK-LABEL: vsll_vx_nxv32i16_unmasked:
866 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
867 ; CHECK-NEXT: vsll.vx v8, v8, a0
869 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
870 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
871 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
872 ret <vscale x 32 x i16> %v
875 define <vscale x 32 x i16> @vsll_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
876 ; CHECK-LABEL: vsll_vi_nxv32i16:
878 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
879 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
881 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 3), <vscale x 32 x i1> %m, i32 %evl)
882 ret <vscale x 32 x i16> %v
885 define <vscale x 32 x i16> @vsll_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
886 ; CHECK-LABEL: vsll_vi_nxv32i16_unmasked:
888 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
889 ; CHECK-NEXT: vsll.vi v8, v8, 3
891 %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 3), <vscale x 32 x i1> splat (i1 true), i32 %evl)
892 ret <vscale x 32 x i16> %v
895 declare <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
897 define <vscale x 1 x i32> @vsll_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
898 ; CHECK-LABEL: vsll_vv_nxv1i32:
900 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
901 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
903 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
904 ret <vscale x 1 x i32> %v
907 define <vscale x 1 x i32> @vsll_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
908 ; CHECK-LABEL: vsll_vv_nxv1i32_unmasked:
910 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
911 ; CHECK-NEXT: vsll.vv v8, v8, v9
913 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
914 ret <vscale x 1 x i32> %v
917 define <vscale x 1 x i32> @vsll_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
918 ; CHECK-LABEL: vsll_vx_nxv1i32:
920 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
921 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
923 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
924 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
925 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
926 ret <vscale x 1 x i32> %v
929 define <vscale x 1 x i32> @vsll_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
930 ; CHECK-LABEL: vsll_vx_nxv1i32_unmasked:
932 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
933 ; CHECK-NEXT: vsll.vx v8, v8, a0
935 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
936 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
937 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
938 ret <vscale x 1 x i32> %v
941 define <vscale x 1 x i32> @vsll_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
942 ; CHECK-LABEL: vsll_vi_nxv1i32:
944 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
945 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
947 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 3), <vscale x 1 x i1> %m, i32 %evl)
948 ret <vscale x 1 x i32> %v
951 define <vscale x 1 x i32> @vsll_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
952 ; CHECK-LABEL: vsll_vi_nxv1i32_unmasked:
954 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
955 ; CHECK-NEXT: vsll.vi v8, v8, 3
957 %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
958 ret <vscale x 1 x i32> %v
961 declare <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
963 define <vscale x 2 x i32> @vsll_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
964 ; CHECK-LABEL: vsll_vv_nxv2i32:
966 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
967 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
969 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
970 ret <vscale x 2 x i32> %v
973 define <vscale x 2 x i32> @vsll_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
974 ; CHECK-LABEL: vsll_vv_nxv2i32_unmasked:
976 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
977 ; CHECK-NEXT: vsll.vv v8, v8, v9
979 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
980 ret <vscale x 2 x i32> %v
983 define <vscale x 2 x i32> @vsll_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
984 ; CHECK-LABEL: vsll_vx_nxv2i32:
986 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
987 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
989 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
990 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
991 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
992 ret <vscale x 2 x i32> %v
995 define <vscale x 2 x i32> @vsll_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
996 ; CHECK-LABEL: vsll_vx_nxv2i32_unmasked:
998 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
999 ; CHECK-NEXT: vsll.vx v8, v8, a0
1001 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1002 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1003 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1004 ret <vscale x 2 x i32> %v
1007 define <vscale x 2 x i32> @vsll_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1008 ; CHECK-LABEL: vsll_vi_nxv2i32:
1010 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1011 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1013 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 3), <vscale x 2 x i1> %m, i32 %evl)
1014 ret <vscale x 2 x i32> %v
1017 define <vscale x 2 x i32> @vsll_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
1018 ; CHECK-LABEL: vsll_vi_nxv2i32_unmasked:
1020 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1021 ; CHECK-NEXT: vsll.vi v8, v8, 3
1023 %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1024 ret <vscale x 2 x i32> %v
1027 declare <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1029 define <vscale x 4 x i32> @vsll_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1030 ; CHECK-LABEL: vsll_vv_nxv4i32:
1032 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1033 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
1035 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
1036 ret <vscale x 4 x i32> %v
1039 define <vscale x 4 x i32> @vsll_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
1040 ; CHECK-LABEL: vsll_vv_nxv4i32_unmasked:
1042 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1043 ; CHECK-NEXT: vsll.vv v8, v8, v10
1045 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1046 ret <vscale x 4 x i32> %v
1049 define <vscale x 4 x i32> @vsll_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1050 ; CHECK-LABEL: vsll_vx_nxv4i32:
1052 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1053 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
1055 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1056 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1057 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
1058 ret <vscale x 4 x i32> %v
1061 define <vscale x 4 x i32> @vsll_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
1062 ; CHECK-LABEL: vsll_vx_nxv4i32_unmasked:
1064 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1065 ; CHECK-NEXT: vsll.vx v8, v8, a0
1067 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1068 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1069 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1070 ret <vscale x 4 x i32> %v
1073 define <vscale x 4 x i32> @vsll_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1074 ; CHECK-LABEL: vsll_vi_nxv4i32:
1076 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1077 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1079 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 3), <vscale x 4 x i1> %m, i32 %evl)
1080 ret <vscale x 4 x i32> %v
1083 define <vscale x 4 x i32> @vsll_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
1084 ; CHECK-LABEL: vsll_vi_nxv4i32_unmasked:
1086 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1087 ; CHECK-NEXT: vsll.vi v8, v8, 3
1089 %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1090 ret <vscale x 4 x i32> %v
1093 declare <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1095 define <vscale x 8 x i32> @vsll_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1096 ; CHECK-LABEL: vsll_vv_nxv8i32:
1098 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1099 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
1101 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
1102 ret <vscale x 8 x i32> %v
1105 define <vscale x 8 x i32> @vsll_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
1106 ; CHECK-LABEL: vsll_vv_nxv8i32_unmasked:
1108 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1109 ; CHECK-NEXT: vsll.vv v8, v8, v12
1111 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1112 ret <vscale x 8 x i32> %v
1115 define <vscale x 8 x i32> @vsll_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1116 ; CHECK-LABEL: vsll_vx_nxv8i32:
1118 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1119 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
1121 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1122 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1123 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
1124 ret <vscale x 8 x i32> %v
1127 define <vscale x 8 x i32> @vsll_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
1128 ; CHECK-LABEL: vsll_vx_nxv8i32_unmasked:
1130 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1131 ; CHECK-NEXT: vsll.vx v8, v8, a0
1133 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1134 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1135 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1136 ret <vscale x 8 x i32> %v
1139 define <vscale x 8 x i32> @vsll_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1140 ; CHECK-LABEL: vsll_vi_nxv8i32:
1142 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1143 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1145 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 3), <vscale x 8 x i1> %m, i32 %evl)
1146 ret <vscale x 8 x i32> %v
1149 define <vscale x 8 x i32> @vsll_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
1150 ; CHECK-LABEL: vsll_vi_nxv8i32_unmasked:
1152 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1153 ; CHECK-NEXT: vsll.vi v8, v8, 3
1155 %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1156 ret <vscale x 8 x i32> %v
1159 declare <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1161 define <vscale x 16 x i32> @vsll_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1162 ; CHECK-LABEL: vsll_vv_nxv16i32:
1164 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1165 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
1167 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
1168 ret <vscale x 16 x i32> %v
1171 define <vscale x 16 x i32> @vsll_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
1172 ; CHECK-LABEL: vsll_vv_nxv16i32_unmasked:
1174 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1175 ; CHECK-NEXT: vsll.vv v8, v8, v16
1177 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1178 ret <vscale x 16 x i32> %v
1181 define <vscale x 16 x i32> @vsll_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1182 ; CHECK-LABEL: vsll_vx_nxv16i32:
1184 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1185 ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
1187 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1188 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1189 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
1190 ret <vscale x 16 x i32> %v
1193 define <vscale x 16 x i32> @vsll_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
1194 ; CHECK-LABEL: vsll_vx_nxv16i32_unmasked:
1196 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1197 ; CHECK-NEXT: vsll.vx v8, v8, a0
1199 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1200 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1201 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1202 ret <vscale x 16 x i32> %v
1205 define <vscale x 16 x i32> @vsll_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1206 ; CHECK-LABEL: vsll_vi_nxv16i32:
1208 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1209 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1211 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 3), <vscale x 16 x i1> %m, i32 %evl)
1212 ret <vscale x 16 x i32> %v
1215 define <vscale x 16 x i32> @vsll_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
1216 ; CHECK-LABEL: vsll_vi_nxv16i32_unmasked:
1218 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1219 ; CHECK-NEXT: vsll.vi v8, v8, 3
1221 %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
1222 ret <vscale x 16 x i32> %v
1225 declare <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1227 define <vscale x 1 x i64> @vsll_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1228 ; CHECK-LABEL: vsll_vv_nxv1i64:
1230 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1231 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
1233 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1234 ret <vscale x 1 x i64> %v
1237 define <vscale x 1 x i64> @vsll_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1238 ; CHECK-LABEL: vsll_vv_nxv1i64_unmasked:
1240 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1241 ; CHECK-NEXT: vsll.vv v8, v8, v9
1243 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1244 ret <vscale x 1 x i64> %v
1247 define <vscale x 1 x i64> @vsll_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1248 ; RV32-LABEL: vsll_vx_nxv1i64:
1250 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1251 ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
1254 ; RV64-LABEL: vsll_vx_nxv1i64:
1256 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1257 ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
1259 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1260 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1261 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1262 ret <vscale x 1 x i64> %v
1265 define <vscale x 1 x i64> @vsll_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1266 ; RV32-LABEL: vsll_vx_nxv1i64_unmasked:
1268 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1269 ; RV32-NEXT: vsll.vx v8, v8, a0
1272 ; RV64-LABEL: vsll_vx_nxv1i64_unmasked:
1274 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1275 ; RV64-NEXT: vsll.vx v8, v8, a0
1277 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1278 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1279 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1280 ret <vscale x 1 x i64> %v
1283 define <vscale x 1 x i64> @vsll_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1284 ; CHECK-LABEL: vsll_vi_nxv1i64:
1286 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1287 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1289 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 3), <vscale x 1 x i1> %m, i32 %evl)
1290 ret <vscale x 1 x i64> %v
1293 define <vscale x 1 x i64> @vsll_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
1294 ; CHECK-LABEL: vsll_vi_nxv1i64_unmasked:
1296 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1297 ; CHECK-NEXT: vsll.vi v8, v8, 3
1299 %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1300 ret <vscale x 1 x i64> %v
1303 declare <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1305 define <vscale x 2 x i64> @vsll_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1306 ; CHECK-LABEL: vsll_vv_nxv2i64:
1308 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1309 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
1311 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1312 ret <vscale x 2 x i64> %v
1315 define <vscale x 2 x i64> @vsll_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1316 ; CHECK-LABEL: vsll_vv_nxv2i64_unmasked:
1318 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1319 ; CHECK-NEXT: vsll.vv v8, v8, v10
1321 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1322 ret <vscale x 2 x i64> %v
1325 define <vscale x 2 x i64> @vsll_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1326 ; RV32-LABEL: vsll_vx_nxv2i64:
1328 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1329 ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
1332 ; RV64-LABEL: vsll_vx_nxv2i64:
1334 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1335 ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
1337 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1338 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1339 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1340 ret <vscale x 2 x i64> %v
1343 define <vscale x 2 x i64> @vsll_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1344 ; RV32-LABEL: vsll_vx_nxv2i64_unmasked:
1346 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1347 ; RV32-NEXT: vsll.vx v8, v8, a0
1350 ; RV64-LABEL: vsll_vx_nxv2i64_unmasked:
1352 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1353 ; RV64-NEXT: vsll.vx v8, v8, a0
1355 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1356 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1357 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1358 ret <vscale x 2 x i64> %v
1361 define <vscale x 2 x i64> @vsll_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1362 ; CHECK-LABEL: vsll_vi_nxv2i64:
1364 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1365 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1367 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 3), <vscale x 2 x i1> %m, i32 %evl)
1368 ret <vscale x 2 x i64> %v
1371 define <vscale x 2 x i64> @vsll_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
1372 ; CHECK-LABEL: vsll_vi_nxv2i64_unmasked:
1374 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1375 ; CHECK-NEXT: vsll.vi v8, v8, 3
1377 %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1378 ret <vscale x 2 x i64> %v
1381 declare <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1383 define <vscale x 4 x i64> @vsll_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1384 ; CHECK-LABEL: vsll_vv_nxv4i64:
1386 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1387 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
1389 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1390 ret <vscale x 4 x i64> %v
1393 define <vscale x 4 x i64> @vsll_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1394 ; CHECK-LABEL: vsll_vv_nxv4i64_unmasked:
1396 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1397 ; CHECK-NEXT: vsll.vv v8, v8, v12
1399 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1400 ret <vscale x 4 x i64> %v
1403 define <vscale x 4 x i64> @vsll_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1404 ; RV32-LABEL: vsll_vx_nxv4i64:
1406 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1407 ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
1410 ; RV64-LABEL: vsll_vx_nxv4i64:
1412 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1413 ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
1415 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1416 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1417 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1418 ret <vscale x 4 x i64> %v
1421 define <vscale x 4 x i64> @vsll_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1422 ; RV32-LABEL: vsll_vx_nxv4i64_unmasked:
1424 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1425 ; RV32-NEXT: vsll.vx v8, v8, a0
1428 ; RV64-LABEL: vsll_vx_nxv4i64_unmasked:
1430 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1431 ; RV64-NEXT: vsll.vx v8, v8, a0
1433 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1434 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1435 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1436 ret <vscale x 4 x i64> %v
1439 define <vscale x 4 x i64> @vsll_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1440 ; CHECK-LABEL: vsll_vi_nxv4i64:
1442 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1443 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1445 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 3), <vscale x 4 x i1> %m, i32 %evl)
1446 ret <vscale x 4 x i64> %v
1449 define <vscale x 4 x i64> @vsll_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1450 ; CHECK-LABEL: vsll_vi_nxv4i64_unmasked:
1452 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1453 ; CHECK-NEXT: vsll.vi v8, v8, 3
1455 %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1456 ret <vscale x 4 x i64> %v
1459 declare <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1461 define <vscale x 8 x i64> @vsll_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1462 ; CHECK-LABEL: vsll_vv_nxv8i64:
1464 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1465 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
1467 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1468 ret <vscale x 8 x i64> %v
1471 define <vscale x 8 x i64> @vsll_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1472 ; CHECK-LABEL: vsll_vv_nxv8i64_unmasked:
1474 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1475 ; CHECK-NEXT: vsll.vv v8, v8, v16
1477 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1478 ret <vscale x 8 x i64> %v
1481 define <vscale x 8 x i64> @vsll_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1482 ; RV32-LABEL: vsll_vx_nxv8i64:
1484 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1485 ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
1488 ; RV64-LABEL: vsll_vx_nxv8i64:
1490 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1491 ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
1493 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1494 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1495 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1496 ret <vscale x 8 x i64> %v
1499 define <vscale x 8 x i64> @vsll_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1500 ; RV32-LABEL: vsll_vx_nxv8i64_unmasked:
1502 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1503 ; RV32-NEXT: vsll.vx v8, v8, a0
1506 ; RV64-LABEL: vsll_vx_nxv8i64_unmasked:
1508 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1509 ; RV64-NEXT: vsll.vx v8, v8, a0
1511 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1512 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1513 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1514 ret <vscale x 8 x i64> %v
1517 define <vscale x 8 x i64> @vsll_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1518 ; CHECK-LABEL: vsll_vi_nxv8i64:
1520 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1521 ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
1523 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 3), <vscale x 8 x i1> %m, i32 %evl)
1524 ret <vscale x 8 x i64> %v
1527 define <vscale x 8 x i64> @vsll_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1528 ; CHECK-LABEL: vsll_vi_nxv8i64_unmasked:
1530 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1531 ; CHECK-NEXT: vsll.vi v8, v8, 3
1533 %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1534 ret <vscale x 8 x i64> %v