1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64NOM
5 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
6 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64M
8 define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
9 ; CHECK-LABEL: vmul_vv_nxv1i8:
11 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
12 ; CHECK-NEXT: vmul.vv v8, v8, v9
14 %vc = mul <vscale x 1 x i8> %va, %vb
15 ret <vscale x 1 x i8> %vc
18 define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
19 ; CHECK-LABEL: vmul_vx_nxv1i8:
21 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
22 ; CHECK-NEXT: vmul.vx v8, v8, a0
24 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
25 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
26 %vc = mul <vscale x 1 x i8> %va, %splat
27 ret <vscale x 1 x i8> %vc
30 define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
31 ; CHECK-LABEL: vmul_vi_nxv1i8_0:
33 ; CHECK-NEXT: li a0, -7
34 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
35 ; CHECK-NEXT: vmul.vx v8, v8, a0
37 %vc = mul <vscale x 1 x i8> %va, splat (i8 -7)
38 ret <vscale x 1 x i8> %vc
41 define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
42 ; CHECK-LABEL: vmul_vv_nxv2i8:
44 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
45 ; CHECK-NEXT: vmul.vv v8, v8, v9
47 %vc = mul <vscale x 2 x i8> %va, %vb
48 ret <vscale x 2 x i8> %vc
51 define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
52 ; CHECK-LABEL: vmul_vx_nxv2i8:
54 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
55 ; CHECK-NEXT: vmul.vx v8, v8, a0
57 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
58 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
59 %vc = mul <vscale x 2 x i8> %va, %splat
60 ret <vscale x 2 x i8> %vc
63 define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
64 ; CHECK-LABEL: vmul_vi_nxv2i8_0:
66 ; CHECK-NEXT: li a0, -7
67 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
68 ; CHECK-NEXT: vmul.vx v8, v8, a0
70 %vc = mul <vscale x 2 x i8> %va, splat (i8 -7)
71 ret <vscale x 2 x i8> %vc
74 define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
75 ; CHECK-LABEL: vmul_vv_nxv4i8:
77 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
78 ; CHECK-NEXT: vmul.vv v8, v8, v9
80 %vc = mul <vscale x 4 x i8> %va, %vb
81 ret <vscale x 4 x i8> %vc
84 define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
85 ; CHECK-LABEL: vmul_vx_nxv4i8:
87 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
88 ; CHECK-NEXT: vmul.vx v8, v8, a0
90 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
91 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
92 %vc = mul <vscale x 4 x i8> %va, %splat
93 ret <vscale x 4 x i8> %vc
96 define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
97 ; CHECK-LABEL: vmul_vi_nxv4i8_0:
99 ; CHECK-NEXT: li a0, -7
100 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
101 ; CHECK-NEXT: vmul.vx v8, v8, a0
103 %vc = mul <vscale x 4 x i8> %va, splat (i8 -7)
104 ret <vscale x 4 x i8> %vc
107 define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
108 ; CHECK-LABEL: vmul_vv_nxv8i8:
110 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
111 ; CHECK-NEXT: vmul.vv v8, v8, v9
113 %vc = mul <vscale x 8 x i8> %va, %vb
114 ret <vscale x 8 x i8> %vc
117 define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
118 ; CHECK-LABEL: vmul_vx_nxv8i8:
120 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
121 ; CHECK-NEXT: vmul.vx v8, v8, a0
123 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
124 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
125 %vc = mul <vscale x 8 x i8> %va, %splat
126 ret <vscale x 8 x i8> %vc
129 define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
130 ; CHECK-LABEL: vmul_vi_nxv8i8_0:
132 ; CHECK-NEXT: li a0, -7
133 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
134 ; CHECK-NEXT: vmul.vx v8, v8, a0
136 %vc = mul <vscale x 8 x i8> %va, splat (i8 -7)
137 ret <vscale x 8 x i8> %vc
140 define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
141 ; CHECK-LABEL: vmul_vv_nxv16i8:
143 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
144 ; CHECK-NEXT: vmul.vv v8, v8, v10
146 %vc = mul <vscale x 16 x i8> %va, %vb
147 ret <vscale x 16 x i8> %vc
150 define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
151 ; CHECK-LABEL: vmul_vx_nxv16i8:
153 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
154 ; CHECK-NEXT: vmul.vx v8, v8, a0
156 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
157 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
158 %vc = mul <vscale x 16 x i8> %va, %splat
159 ret <vscale x 16 x i8> %vc
162 define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
163 ; CHECK-LABEL: vmul_vi_nxv16i8_0:
165 ; CHECK-NEXT: li a0, -7
166 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
167 ; CHECK-NEXT: vmul.vx v8, v8, a0
169 %vc = mul <vscale x 16 x i8> %va, splat (i8 -7)
170 ret <vscale x 16 x i8> %vc
173 define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
174 ; CHECK-LABEL: vmul_vv_nxv32i8:
176 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
177 ; CHECK-NEXT: vmul.vv v8, v8, v12
179 %vc = mul <vscale x 32 x i8> %va, %vb
180 ret <vscale x 32 x i8> %vc
183 define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
184 ; CHECK-LABEL: vmul_vx_nxv32i8:
186 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
187 ; CHECK-NEXT: vmul.vx v8, v8, a0
189 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
190 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
191 %vc = mul <vscale x 32 x i8> %va, %splat
192 ret <vscale x 32 x i8> %vc
195 define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
196 ; CHECK-LABEL: vmul_vi_nxv32i8_0:
198 ; CHECK-NEXT: li a0, -7
199 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
200 ; CHECK-NEXT: vmul.vx v8, v8, a0
202 %vc = mul <vscale x 32 x i8> %va, splat (i8 -7)
203 ret <vscale x 32 x i8> %vc
206 define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
207 ; CHECK-LABEL: vmul_vv_nxv64i8:
209 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
210 ; CHECK-NEXT: vmul.vv v8, v8, v16
212 %vc = mul <vscale x 64 x i8> %va, %vb
213 ret <vscale x 64 x i8> %vc
216 define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
217 ; CHECK-LABEL: vmul_vx_nxv64i8:
219 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
220 ; CHECK-NEXT: vmul.vx v8, v8, a0
222 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
223 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
224 %vc = mul <vscale x 64 x i8> %va, %splat
225 ret <vscale x 64 x i8> %vc
228 define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
229 ; CHECK-LABEL: vmul_vi_nxv64i8_0:
231 ; CHECK-NEXT: li a0, -7
232 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
233 ; CHECK-NEXT: vmul.vx v8, v8, a0
235 %vc = mul <vscale x 64 x i8> %va, splat (i8 -7)
236 ret <vscale x 64 x i8> %vc
239 define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
240 ; CHECK-LABEL: vmul_vv_nxv1i16:
242 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
243 ; CHECK-NEXT: vmul.vv v8, v8, v9
245 %vc = mul <vscale x 1 x i16> %va, %vb
246 ret <vscale x 1 x i16> %vc
249 define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
250 ; CHECK-LABEL: vmul_vx_nxv1i16:
252 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
253 ; CHECK-NEXT: vmul.vx v8, v8, a0
255 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
256 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
257 %vc = mul <vscale x 1 x i16> %va, %splat
258 ret <vscale x 1 x i16> %vc
261 define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
262 ; CHECK-LABEL: vmul_vi_nxv1i16_0:
264 ; CHECK-NEXT: li a0, -7
265 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
266 ; CHECK-NEXT: vmul.vx v8, v8, a0
268 %vc = mul <vscale x 1 x i16> %va, splat (i16 -7)
269 ret <vscale x 1 x i16> %vc
272 define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
273 ; CHECK-LABEL: vmul_vv_nxv2i16:
275 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
276 ; CHECK-NEXT: vmul.vv v8, v8, v9
278 %vc = mul <vscale x 2 x i16> %va, %vb
279 ret <vscale x 2 x i16> %vc
282 define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
283 ; CHECK-LABEL: vmul_vx_nxv2i16:
285 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
286 ; CHECK-NEXT: vmul.vx v8, v8, a0
288 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
289 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
290 %vc = mul <vscale x 2 x i16> %va, %splat
291 ret <vscale x 2 x i16> %vc
294 define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
295 ; CHECK-LABEL: vmul_vi_nxv2i16_0:
297 ; CHECK-NEXT: li a0, -7
298 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
299 ; CHECK-NEXT: vmul.vx v8, v8, a0
301 %vc = mul <vscale x 2 x i16> %va, splat (i16 -7)
302 ret <vscale x 2 x i16> %vc
305 define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
306 ; CHECK-LABEL: vmul_vv_nxv4i16:
308 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
309 ; CHECK-NEXT: vmul.vv v8, v8, v9
311 %vc = mul <vscale x 4 x i16> %va, %vb
312 ret <vscale x 4 x i16> %vc
315 define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
316 ; CHECK-LABEL: vmul_vx_nxv4i16:
318 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
319 ; CHECK-NEXT: vmul.vx v8, v8, a0
321 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
322 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
323 %vc = mul <vscale x 4 x i16> %va, %splat
324 ret <vscale x 4 x i16> %vc
327 define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
328 ; CHECK-LABEL: vmul_vi_nxv4i16_0:
330 ; CHECK-NEXT: li a0, -7
331 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
332 ; CHECK-NEXT: vmul.vx v8, v8, a0
334 %vc = mul <vscale x 4 x i16> %va, splat (i16 -7)
335 ret <vscale x 4 x i16> %vc
338 define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
339 ; CHECK-LABEL: vmul_vv_nxv8i16:
341 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
342 ; CHECK-NEXT: vmul.vv v8, v8, v10
344 %vc = mul <vscale x 8 x i16> %va, %vb
345 ret <vscale x 8 x i16> %vc
348 define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
349 ; CHECK-LABEL: vmul_vx_nxv8i16:
351 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
352 ; CHECK-NEXT: vmul.vx v8, v8, a0
354 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
355 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
356 %vc = mul <vscale x 8 x i16> %va, %splat
357 ret <vscale x 8 x i16> %vc
360 define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
361 ; CHECK-LABEL: vmul_vi_nxv8i16_0:
363 ; CHECK-NEXT: li a0, -7
364 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
365 ; CHECK-NEXT: vmul.vx v8, v8, a0
367 %vc = mul <vscale x 8 x i16> %va, splat (i16 -7)
368 ret <vscale x 8 x i16> %vc
371 define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
372 ; CHECK-LABEL: vmul_vv_nxv16i16:
374 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
375 ; CHECK-NEXT: vmul.vv v8, v8, v12
377 %vc = mul <vscale x 16 x i16> %va, %vb
378 ret <vscale x 16 x i16> %vc
381 define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
382 ; CHECK-LABEL: vmul_vx_nxv16i16:
384 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
385 ; CHECK-NEXT: vmul.vx v8, v8, a0
387 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
388 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
389 %vc = mul <vscale x 16 x i16> %va, %splat
390 ret <vscale x 16 x i16> %vc
393 define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
394 ; CHECK-LABEL: vmul_vi_nxv16i16_0:
396 ; CHECK-NEXT: li a0, -7
397 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
398 ; CHECK-NEXT: vmul.vx v8, v8, a0
400 %vc = mul <vscale x 16 x i16> %va, splat (i16 -7)
401 ret <vscale x 16 x i16> %vc
404 define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
405 ; CHECK-LABEL: vmul_vv_nxv32i16:
407 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
408 ; CHECK-NEXT: vmul.vv v8, v8, v16
410 %vc = mul <vscale x 32 x i16> %va, %vb
411 ret <vscale x 32 x i16> %vc
414 define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
415 ; CHECK-LABEL: vmul_vx_nxv32i16:
417 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
418 ; CHECK-NEXT: vmul.vx v8, v8, a0
420 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
421 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
422 %vc = mul <vscale x 32 x i16> %va, %splat
423 ret <vscale x 32 x i16> %vc
426 define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
427 ; CHECK-LABEL: vmul_vi_nxv32i16_0:
429 ; CHECK-NEXT: li a0, -7
430 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
431 ; CHECK-NEXT: vmul.vx v8, v8, a0
433 %vc = mul <vscale x 32 x i16> %va, splat (i16 -7)
434 ret <vscale x 32 x i16> %vc
437 define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
438 ; CHECK-LABEL: vmul_vv_nxv1i32:
440 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
441 ; CHECK-NEXT: vmul.vv v8, v8, v9
443 %vc = mul <vscale x 1 x i32> %va, %vb
444 ret <vscale x 1 x i32> %vc
447 define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
448 ; CHECK-LABEL: vmul_vx_nxv1i32:
450 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
451 ; CHECK-NEXT: vmul.vx v8, v8, a0
453 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
454 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
455 %vc = mul <vscale x 1 x i32> %va, %splat
456 ret <vscale x 1 x i32> %vc
459 define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
460 ; CHECK-LABEL: vmul_vi_nxv1i32_0:
462 ; CHECK-NEXT: li a0, -7
463 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
464 ; CHECK-NEXT: vmul.vx v8, v8, a0
466 %vc = mul <vscale x 1 x i32> %va, splat (i32 -7)
467 ret <vscale x 1 x i32> %vc
470 define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
471 ; CHECK-LABEL: vmul_vv_nxv2i32:
473 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
474 ; CHECK-NEXT: vmul.vv v8, v8, v9
476 %vc = mul <vscale x 2 x i32> %va, %vb
477 ret <vscale x 2 x i32> %vc
480 define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
481 ; CHECK-LABEL: vmul_vx_nxv2i32:
483 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
484 ; CHECK-NEXT: vmul.vx v8, v8, a0
486 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
487 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
488 %vc = mul <vscale x 2 x i32> %va, %splat
489 ret <vscale x 2 x i32> %vc
492 define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
493 ; CHECK-LABEL: vmul_vi_nxv2i32_0:
495 ; CHECK-NEXT: li a0, -7
496 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
497 ; CHECK-NEXT: vmul.vx v8, v8, a0
499 %vc = mul <vscale x 2 x i32> %va, splat (i32 -7)
500 ret <vscale x 2 x i32> %vc
503 define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
504 ; CHECK-LABEL: vmul_vv_nxv4i32:
506 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
507 ; CHECK-NEXT: vmul.vv v8, v8, v10
509 %vc = mul <vscale x 4 x i32> %va, %vb
510 ret <vscale x 4 x i32> %vc
513 define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
514 ; CHECK-LABEL: vmul_vx_nxv4i32:
516 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
517 ; CHECK-NEXT: vmul.vx v8, v8, a0
519 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
520 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
521 %vc = mul <vscale x 4 x i32> %va, %splat
522 ret <vscale x 4 x i32> %vc
525 define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
526 ; CHECK-LABEL: vmul_vi_nxv4i32_0:
528 ; CHECK-NEXT: li a0, -7
529 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
530 ; CHECK-NEXT: vmul.vx v8, v8, a0
532 %vc = mul <vscale x 4 x i32> %va, splat (i32 -7)
533 ret <vscale x 4 x i32> %vc
536 define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
537 ; CHECK-LABEL: vmul_vv_nxv8i32:
539 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
540 ; CHECK-NEXT: vmul.vv v8, v8, v12
542 %vc = mul <vscale x 8 x i32> %va, %vb
543 ret <vscale x 8 x i32> %vc
546 define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
547 ; CHECK-LABEL: vmul_vx_nxv8i32:
549 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
550 ; CHECK-NEXT: vmul.vx v8, v8, a0
552 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
553 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
554 %vc = mul <vscale x 8 x i32> %va, %splat
555 ret <vscale x 8 x i32> %vc
558 define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
559 ; CHECK-LABEL: vmul_vi_nxv8i32_0:
561 ; CHECK-NEXT: li a0, -7
562 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
563 ; CHECK-NEXT: vmul.vx v8, v8, a0
565 %vc = mul <vscale x 8 x i32> %va, splat (i32 -7)
566 ret <vscale x 8 x i32> %vc
569 define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
570 ; CHECK-LABEL: vmul_vv_nxv16i32:
572 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
573 ; CHECK-NEXT: vmul.vv v8, v8, v16
575 %vc = mul <vscale x 16 x i32> %va, %vb
576 ret <vscale x 16 x i32> %vc
579 define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
580 ; CHECK-LABEL: vmul_vx_nxv16i32:
582 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
583 ; CHECK-NEXT: vmul.vx v8, v8, a0
585 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
586 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
587 %vc = mul <vscale x 16 x i32> %va, %splat
588 ret <vscale x 16 x i32> %vc
591 define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
592 ; CHECK-LABEL: vmul_vi_nxv16i32_0:
594 ; CHECK-NEXT: li a0, -7
595 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
596 ; CHECK-NEXT: vmul.vx v8, v8, a0
598 %vc = mul <vscale x 16 x i32> %va, splat (i32 -7)
599 ret <vscale x 16 x i32> %vc
602 define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
603 ; CHECK-LABEL: vmul_vv_nxv1i64:
605 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
606 ; CHECK-NEXT: vmul.vv v8, v8, v9
608 %vc = mul <vscale x 1 x i64> %va, %vb
609 ret <vscale x 1 x i64> %vc
612 define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
613 ; RV32-LABEL: vmul_vx_nxv1i64:
615 ; RV32-NEXT: addi sp, sp, -16
616 ; RV32-NEXT: .cfi_def_cfa_offset 16
617 ; RV32-NEXT: sw a1, 12(sp)
618 ; RV32-NEXT: sw a0, 8(sp)
619 ; RV32-NEXT: addi a0, sp, 8
620 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
621 ; RV32-NEXT: vlse64.v v9, (a0), zero
622 ; RV32-NEXT: vmul.vv v8, v8, v9
623 ; RV32-NEXT: addi sp, sp, 16
626 ; RV64-LABEL: vmul_vx_nxv1i64:
628 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
629 ; RV64-NEXT: vmul.vx v8, v8, a0
631 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
632 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
633 %vc = mul <vscale x 1 x i64> %va, %splat
634 ret <vscale x 1 x i64> %vc
637 define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
638 ; CHECK-LABEL: vmul_vi_nxv1i64_0:
640 ; CHECK-NEXT: li a0, -7
641 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
642 ; CHECK-NEXT: vmul.vx v8, v8, a0
644 %vc = mul <vscale x 1 x i64> %va, splat (i64 -7)
645 ret <vscale x 1 x i64> %vc
648 define <vscale x 1 x i64> @vmul_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
649 ; CHECK-LABEL: vmul_vi_nxv1i64_1:
651 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
652 ; CHECK-NEXT: vadd.vv v8, v8, v8
654 %vc = mul <vscale x 1 x i64> %va, splat (i64 2)
655 ret <vscale x 1 x i64> %vc
658 define <vscale x 1 x i64> @vmul_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
659 ; CHECK-LABEL: vmul_vi_nxv1i64_2:
661 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
662 ; CHECK-NEXT: vsll.vi v8, v8, 4
664 %vc = mul <vscale x 1 x i64> %va, splat (i64 16)
665 ret <vscale x 1 x i64> %vc
668 define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
669 ; CHECK-LABEL: vmul_vv_nxv2i64:
671 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
672 ; CHECK-NEXT: vmul.vv v8, v8, v10
674 %vc = mul <vscale x 2 x i64> %va, %vb
675 ret <vscale x 2 x i64> %vc
678 define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
679 ; RV32-LABEL: vmul_vx_nxv2i64:
681 ; RV32-NEXT: addi sp, sp, -16
682 ; RV32-NEXT: .cfi_def_cfa_offset 16
683 ; RV32-NEXT: sw a1, 12(sp)
684 ; RV32-NEXT: sw a0, 8(sp)
685 ; RV32-NEXT: addi a0, sp, 8
686 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
687 ; RV32-NEXT: vlse64.v v10, (a0), zero
688 ; RV32-NEXT: vmul.vv v8, v8, v10
689 ; RV32-NEXT: addi sp, sp, 16
692 ; RV64-LABEL: vmul_vx_nxv2i64:
694 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
695 ; RV64-NEXT: vmul.vx v8, v8, a0
697 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
698 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
699 %vc = mul <vscale x 2 x i64> %va, %splat
700 ret <vscale x 2 x i64> %vc
703 define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
704 ; CHECK-LABEL: vmul_vi_nxv2i64_0:
706 ; CHECK-NEXT: li a0, -7
707 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
708 ; CHECK-NEXT: vmul.vx v8, v8, a0
710 %vc = mul <vscale x 2 x i64> %va, splat (i64 -7)
711 ret <vscale x 2 x i64> %vc
714 define <vscale x 2 x i64> @vmul_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
715 ; CHECK-LABEL: vmul_vi_nxv2i64_1:
717 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
718 ; CHECK-NEXT: vadd.vv v8, v8, v8
720 %vc = mul <vscale x 2 x i64> %va, splat (i64 2)
721 ret <vscale x 2 x i64> %vc
724 define <vscale x 2 x i64> @vmul_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
725 ; CHECK-LABEL: vmul_vi_nxv2i64_2:
727 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
728 ; CHECK-NEXT: vsll.vi v8, v8, 4
730 %vc = mul <vscale x 2 x i64> %va, splat (i64 16)
731 ret <vscale x 2 x i64> %vc
734 define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
735 ; CHECK-LABEL: vmul_vv_nxv4i64:
737 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
738 ; CHECK-NEXT: vmul.vv v8, v8, v12
740 %vc = mul <vscale x 4 x i64> %va, %vb
741 ret <vscale x 4 x i64> %vc
744 define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
745 ; RV32-LABEL: vmul_vx_nxv4i64:
747 ; RV32-NEXT: addi sp, sp, -16
748 ; RV32-NEXT: .cfi_def_cfa_offset 16
749 ; RV32-NEXT: sw a1, 12(sp)
750 ; RV32-NEXT: sw a0, 8(sp)
751 ; RV32-NEXT: addi a0, sp, 8
752 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
753 ; RV32-NEXT: vlse64.v v12, (a0), zero
754 ; RV32-NEXT: vmul.vv v8, v8, v12
755 ; RV32-NEXT: addi sp, sp, 16
758 ; RV64-LABEL: vmul_vx_nxv4i64:
760 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
761 ; RV64-NEXT: vmul.vx v8, v8, a0
763 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
764 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
765 %vc = mul <vscale x 4 x i64> %va, %splat
766 ret <vscale x 4 x i64> %vc
769 define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
770 ; CHECK-LABEL: vmul_vi_nxv4i64_0:
772 ; CHECK-NEXT: li a0, -7
773 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
774 ; CHECK-NEXT: vmul.vx v8, v8, a0
776 %vc = mul <vscale x 4 x i64> %va, splat (i64 -7)
777 ret <vscale x 4 x i64> %vc
780 define <vscale x 4 x i64> @vmul_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
781 ; CHECK-LABEL: vmul_vi_nxv4i64_1:
783 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
784 ; CHECK-NEXT: vadd.vv v8, v8, v8
786 %vc = mul <vscale x 4 x i64> %va, splat (i64 2)
787 ret <vscale x 4 x i64> %vc
790 define <vscale x 4 x i64> @vmul_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
791 ; CHECK-LABEL: vmul_vi_nxv4i64_2:
793 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
794 ; CHECK-NEXT: vsll.vi v8, v8, 4
796 %vc = mul <vscale x 4 x i64> %va, splat (i64 16)
797 ret <vscale x 4 x i64> %vc
800 define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
801 ; CHECK-LABEL: vmul_vv_nxv8i64:
803 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
804 ; CHECK-NEXT: vmul.vv v8, v8, v16
806 %vc = mul <vscale x 8 x i64> %va, %vb
807 ret <vscale x 8 x i64> %vc
810 define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
811 ; RV32-LABEL: vmul_vx_nxv8i64:
813 ; RV32-NEXT: addi sp, sp, -16
814 ; RV32-NEXT: .cfi_def_cfa_offset 16
815 ; RV32-NEXT: sw a1, 12(sp)
816 ; RV32-NEXT: sw a0, 8(sp)
817 ; RV32-NEXT: addi a0, sp, 8
818 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
819 ; RV32-NEXT: vlse64.v v16, (a0), zero
820 ; RV32-NEXT: vmul.vv v8, v8, v16
821 ; RV32-NEXT: addi sp, sp, 16
824 ; RV64-LABEL: vmul_vx_nxv8i64:
826 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
827 ; RV64-NEXT: vmul.vx v8, v8, a0
829 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
830 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
831 %vc = mul <vscale x 8 x i64> %va, %splat
832 ret <vscale x 8 x i64> %vc
835 define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
836 ; CHECK-LABEL: vmul_vi_nxv8i64_0:
838 ; CHECK-NEXT: li a0, -7
839 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
840 ; CHECK-NEXT: vmul.vx v8, v8, a0
842 %vc = mul <vscale x 8 x i64> %va, splat (i64 -7)
843 ret <vscale x 8 x i64> %vc
846 define <vscale x 8 x i64> @vmul_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
847 ; CHECK-LABEL: vmul_vi_nxv8i64_1:
849 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
850 ; CHECK-NEXT: vadd.vv v8, v8, v8
852 %vc = mul <vscale x 8 x i64> %va, splat (i64 2)
853 ret <vscale x 8 x i64> %vc
856 define <vscale x 8 x i64> @vmul_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
857 ; CHECK-LABEL: vmul_vi_nxv8i64_2:
859 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
860 ; CHECK-NEXT: vsll.vi v8, v8, 4
862 %vc = mul <vscale x 8 x i64> %va, splat (i64 16)
863 ret <vscale x 8 x i64> %vc
866 define <vscale x 8 x i64> @vmul_xx_nxv8i64(i64 %a, i64 %b) nounwind {
867 ; RV32-LABEL: vmul_xx_nxv8i64:
869 ; RV32-NEXT: addi sp, sp, -16
870 ; RV32-NEXT: sw a1, 12(sp)
871 ; RV32-NEXT: sw a0, 8(sp)
872 ; RV32-NEXT: addi a0, sp, 8
873 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
874 ; RV32-NEXT: vlse64.v v8, (a0), zero
875 ; RV32-NEXT: sw a3, 4(sp)
876 ; RV32-NEXT: sw a2, 0(sp)
877 ; RV32-NEXT: mv a0, sp
878 ; RV32-NEXT: vlse64.v v16, (a0), zero
879 ; RV32-NEXT: vmul.vv v8, v8, v16
880 ; RV32-NEXT: addi sp, sp, 16
883 ; RV64NOM-LABEL: vmul_xx_nxv8i64:
885 ; RV64NOM-NEXT: vsetvli a2, zero, e64, m8, ta, ma
886 ; RV64NOM-NEXT: vmv.v.x v8, a0
887 ; RV64NOM-NEXT: vmul.vx v8, v8, a1
890 ; RV64M-LABEL: vmul_xx_nxv8i64:
892 ; RV64M-NEXT: mul a0, a0, a1
893 ; RV64M-NEXT: vsetvli a1, zero, e64, m8, ta, ma
894 ; RV64M-NEXT: vmv.v.x v8, a0
896 %head1 = insertelement <vscale x 8 x i64> poison, i64 %a, i32 0
897 %splat1 = shufflevector <vscale x 8 x i64> %head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
898 %head2 = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
899 %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
900 %v = mul <vscale x 8 x i64> %splat1, %splat2
901 ret <vscale x 8 x i64> %v
904 define <vscale x 8 x i32> @vmul_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) {
905 ; CHECK-LABEL: vmul_vv_mask_nxv8i32:
907 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
908 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
910 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
911 %vc = mul <vscale x 8 x i32> %va, %vs
912 ret <vscale x 8 x i32> %vc
915 define <vscale x 8 x i32> @vmul_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) {
916 ; CHECK-LABEL: vmul_vx_mask_nxv8i32:
918 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
919 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
921 %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
922 %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
923 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
924 %vc = mul <vscale x 8 x i32> %va, %vs
925 ret <vscale x 8 x i32> %vc
928 define <vscale x 8 x i32> @vmul_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
929 ; CHECK-LABEL: vmul_vi_mask_nxv8i32:
931 ; CHECK-NEXT: li a0, 7
932 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
933 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
935 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
936 %vc = mul <vscale x 8 x i32> %va, %vs
937 ret <vscale x 8 x i32> %vc