1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vmul_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vmul_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
15 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17 %v = call <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18 ret <vscale x 8 x i7> %v
21 declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
23 define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24 ; CHECK-LABEL: vmul_vv_nxv1i8:
26 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
29 %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30 ret <vscale x 1 x i8> %v
33 define <vscale x 1 x i8> @vmul_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34 ; CHECK-LABEL: vmul_vv_nxv1i8_unmasked:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
37 ; CHECK-NEXT: vmul.vv v8, v8, v9
39 %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40 ret <vscale x 1 x i8> %v
43 define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44 ; CHECK-LABEL: vmul_vx_nxv1i8:
46 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
47 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51 %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52 ret <vscale x 1 x i8> %v
55 define <vscale x 1 x i8> @vmul_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56 ; CHECK-LABEL: vmul_vx_nxv1i8_unmasked:
58 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
59 ; CHECK-NEXT: vmul.vx v8, v8, a0
61 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63 %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64 ret <vscale x 1 x i8> %v
67 declare <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
69 define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
70 ; CHECK-LABEL: vmul_vv_nxv2i8:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
73 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
75 %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
76 ret <vscale x 2 x i8> %v
79 define <vscale x 2 x i8> @vmul_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
80 ; CHECK-LABEL: vmul_vv_nxv2i8_unmasked:
82 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
83 ; CHECK-NEXT: vmul.vv v8, v8, v9
85 %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
86 ret <vscale x 2 x i8> %v
89 define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90 ; CHECK-LABEL: vmul_vx_nxv2i8:
92 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
93 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
95 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
96 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
97 %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
98 ret <vscale x 2 x i8> %v
101 define <vscale x 2 x i8> @vmul_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
102 ; CHECK-LABEL: vmul_vx_nxv2i8_unmasked:
104 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
105 ; CHECK-NEXT: vmul.vx v8, v8, a0
107 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
108 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
109 %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
110 ret <vscale x 2 x i8> %v
113 declare <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
115 define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
116 ; CHECK-LABEL: vmul_vv_nxv4i8:
118 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
119 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
121 %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
122 ret <vscale x 4 x i8> %v
125 define <vscale x 4 x i8> @vmul_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
126 ; CHECK-LABEL: vmul_vv_nxv4i8_unmasked:
128 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
129 ; CHECK-NEXT: vmul.vv v8, v8, v9
131 %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
132 ret <vscale x 4 x i8> %v
135 define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
136 ; CHECK-LABEL: vmul_vx_nxv4i8:
138 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
139 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
141 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
142 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
143 %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
144 ret <vscale x 4 x i8> %v
147 define <vscale x 4 x i8> @vmul_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
148 ; CHECK-LABEL: vmul_vx_nxv4i8_unmasked:
150 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
151 ; CHECK-NEXT: vmul.vx v8, v8, a0
153 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
154 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
155 %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
156 ret <vscale x 4 x i8> %v
159 declare <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
161 define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
162 ; CHECK-LABEL: vmul_vv_nxv8i8:
164 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
165 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
167 %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
168 ret <vscale x 8 x i8> %v
171 define <vscale x 8 x i8> @vmul_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
172 ; CHECK-LABEL: vmul_vv_nxv8i8_unmasked:
174 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
175 ; CHECK-NEXT: vmul.vv v8, v8, v9
177 %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
178 ret <vscale x 8 x i8> %v
181 define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
182 ; CHECK-LABEL: vmul_vx_nxv8i8:
184 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
185 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
187 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
188 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
189 %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
190 ret <vscale x 8 x i8> %v
193 define <vscale x 8 x i8> @vmul_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
194 ; CHECK-LABEL: vmul_vx_nxv8i8_unmasked:
196 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
197 ; CHECK-NEXT: vmul.vx v8, v8, a0
199 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
200 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
201 %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
202 ret <vscale x 8 x i8> %v
205 declare <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
207 define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
208 ; CHECK-LABEL: vmul_vv_nxv16i8:
210 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
211 ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
213 %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
214 ret <vscale x 16 x i8> %v
217 define <vscale x 16 x i8> @vmul_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
218 ; CHECK-LABEL: vmul_vv_nxv16i8_unmasked:
220 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
221 ; CHECK-NEXT: vmul.vv v8, v8, v10
223 %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
224 ret <vscale x 16 x i8> %v
227 define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
228 ; CHECK-LABEL: vmul_vx_nxv16i8:
230 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
231 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
233 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
234 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
235 %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
236 ret <vscale x 16 x i8> %v
239 define <vscale x 16 x i8> @vmul_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
240 ; CHECK-LABEL: vmul_vx_nxv16i8_unmasked:
242 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
243 ; CHECK-NEXT: vmul.vx v8, v8, a0
245 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
246 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
247 %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
248 ret <vscale x 16 x i8> %v
251 declare <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
253 define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
254 ; CHECK-LABEL: vmul_vv_nxv32i8:
256 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
257 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
259 %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
260 ret <vscale x 32 x i8> %v
263 define <vscale x 32 x i8> @vmul_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
264 ; CHECK-LABEL: vmul_vv_nxv32i8_unmasked:
266 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
267 ; CHECK-NEXT: vmul.vv v8, v8, v12
269 %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
270 ret <vscale x 32 x i8> %v
273 define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
274 ; CHECK-LABEL: vmul_vx_nxv32i8:
276 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
277 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
279 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
280 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
281 %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
282 ret <vscale x 32 x i8> %v
285 define <vscale x 32 x i8> @vmul_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
286 ; CHECK-LABEL: vmul_vx_nxv32i8_unmasked:
288 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
289 ; CHECK-NEXT: vmul.vx v8, v8, a0
291 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
292 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
293 %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
294 ret <vscale x 32 x i8> %v
297 declare <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
299 define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
300 ; CHECK-LABEL: vmul_vv_nxv64i8:
302 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
303 ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
305 %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
306 ret <vscale x 64 x i8> %v
309 define <vscale x 64 x i8> @vmul_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
310 ; CHECK-LABEL: vmul_vv_nxv64i8_unmasked:
312 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
313 ; CHECK-NEXT: vmul.vv v8, v8, v16
315 %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
316 ret <vscale x 64 x i8> %v
319 define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
320 ; CHECK-LABEL: vmul_vx_nxv64i8:
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
323 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
325 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
326 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
327 %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
328 ret <vscale x 64 x i8> %v
331 define <vscale x 64 x i8> @vmul_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
332 ; CHECK-LABEL: vmul_vx_nxv64i8_unmasked:
334 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
335 ; CHECK-NEXT: vmul.vx v8, v8, a0
337 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
338 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
339 %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
340 ret <vscale x 64 x i8> %v
343 declare <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
345 define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vmul_vv_nxv1i16:
348 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
349 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
351 %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
352 ret <vscale x 1 x i16> %v
355 define <vscale x 1 x i16> @vmul_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
356 ; CHECK-LABEL: vmul_vv_nxv1i16_unmasked:
358 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
359 ; CHECK-NEXT: vmul.vv v8, v8, v9
361 %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
362 ret <vscale x 1 x i16> %v
365 define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
366 ; CHECK-LABEL: vmul_vx_nxv1i16:
368 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
369 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
371 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
372 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
373 %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
374 ret <vscale x 1 x i16> %v
377 define <vscale x 1 x i16> @vmul_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
378 ; CHECK-LABEL: vmul_vx_nxv1i16_unmasked:
380 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
381 ; CHECK-NEXT: vmul.vx v8, v8, a0
383 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
384 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
385 %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
386 ret <vscale x 1 x i16> %v
389 declare <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
391 define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
392 ; CHECK-LABEL: vmul_vv_nxv2i16:
394 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
395 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
397 %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
398 ret <vscale x 2 x i16> %v
401 define <vscale x 2 x i16> @vmul_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
402 ; CHECK-LABEL: vmul_vv_nxv2i16_unmasked:
404 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
405 ; CHECK-NEXT: vmul.vv v8, v8, v9
407 %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
408 ret <vscale x 2 x i16> %v
411 define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
412 ; CHECK-LABEL: vmul_vx_nxv2i16:
414 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
415 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
417 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
418 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
419 %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
420 ret <vscale x 2 x i16> %v
423 define <vscale x 2 x i16> @vmul_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
424 ; CHECK-LABEL: vmul_vx_nxv2i16_unmasked:
426 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
427 ; CHECK-NEXT: vmul.vx v8, v8, a0
429 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
430 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
431 %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
432 ret <vscale x 2 x i16> %v
435 declare <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
437 define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
438 ; CHECK-LABEL: vmul_vv_nxv4i16:
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
441 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
443 %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
444 ret <vscale x 4 x i16> %v
447 define <vscale x 4 x i16> @vmul_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
448 ; CHECK-LABEL: vmul_vv_nxv4i16_unmasked:
450 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
451 ; CHECK-NEXT: vmul.vv v8, v8, v9
453 %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
454 ret <vscale x 4 x i16> %v
457 define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
458 ; CHECK-LABEL: vmul_vx_nxv4i16:
460 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
461 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
463 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
464 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
465 %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
466 ret <vscale x 4 x i16> %v
469 define <vscale x 4 x i16> @vmul_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
470 ; CHECK-LABEL: vmul_vx_nxv4i16_unmasked:
472 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
473 ; CHECK-NEXT: vmul.vx v8, v8, a0
475 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
476 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
477 %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
478 ret <vscale x 4 x i16> %v
481 declare <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
483 define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
484 ; CHECK-LABEL: vmul_vv_nxv8i16:
486 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
487 ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
489 %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
490 ret <vscale x 8 x i16> %v
493 define <vscale x 8 x i16> @vmul_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
494 ; CHECK-LABEL: vmul_vv_nxv8i16_unmasked:
496 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
497 ; CHECK-NEXT: vmul.vv v8, v8, v10
499 %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
500 ret <vscale x 8 x i16> %v
503 define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
504 ; CHECK-LABEL: vmul_vx_nxv8i16:
506 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
507 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
509 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
510 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
511 %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
512 ret <vscale x 8 x i16> %v
515 define <vscale x 8 x i16> @vmul_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
516 ; CHECK-LABEL: vmul_vx_nxv8i16_unmasked:
518 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
519 ; CHECK-NEXT: vmul.vx v8, v8, a0
521 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
522 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
523 %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
524 ret <vscale x 8 x i16> %v
527 declare <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
529 define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
530 ; CHECK-LABEL: vmul_vv_nxv16i16:
532 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
533 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
535 %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
536 ret <vscale x 16 x i16> %v
539 define <vscale x 16 x i16> @vmul_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
540 ; CHECK-LABEL: vmul_vv_nxv16i16_unmasked:
542 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
543 ; CHECK-NEXT: vmul.vv v8, v8, v12
545 %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
546 ret <vscale x 16 x i16> %v
549 define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
550 ; CHECK-LABEL: vmul_vx_nxv16i16:
552 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
553 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
555 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
556 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
557 %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
558 ret <vscale x 16 x i16> %v
561 define <vscale x 16 x i16> @vmul_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
562 ; CHECK-LABEL: vmul_vx_nxv16i16_unmasked:
564 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
565 ; CHECK-NEXT: vmul.vx v8, v8, a0
567 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
568 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
569 %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
570 ret <vscale x 16 x i16> %v
573 declare <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
575 define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
576 ; CHECK-LABEL: vmul_vv_nxv32i16:
578 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
579 ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
581 %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
582 ret <vscale x 32 x i16> %v
585 define <vscale x 32 x i16> @vmul_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
586 ; CHECK-LABEL: vmul_vv_nxv32i16_unmasked:
588 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
589 ; CHECK-NEXT: vmul.vv v8, v8, v16
591 %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
592 ret <vscale x 32 x i16> %v
595 define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
596 ; CHECK-LABEL: vmul_vx_nxv32i16:
598 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
599 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
601 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
602 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
603 %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
604 ret <vscale x 32 x i16> %v
607 define <vscale x 32 x i16> @vmul_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
608 ; CHECK-LABEL: vmul_vx_nxv32i16_unmasked:
610 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
611 ; CHECK-NEXT: vmul.vx v8, v8, a0
613 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
614 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
615 %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
616 ret <vscale x 32 x i16> %v
619 declare <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
621 define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
622 ; CHECK-LABEL: vmul_vv_nxv1i32:
624 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
625 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
627 %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
628 ret <vscale x 1 x i32> %v
631 define <vscale x 1 x i32> @vmul_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
632 ; CHECK-LABEL: vmul_vv_nxv1i32_unmasked:
634 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
635 ; CHECK-NEXT: vmul.vv v8, v8, v9
637 %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
638 ret <vscale x 1 x i32> %v
641 define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
642 ; CHECK-LABEL: vmul_vx_nxv1i32:
644 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
645 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
647 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
648 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
649 %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
650 ret <vscale x 1 x i32> %v
653 define <vscale x 1 x i32> @vmul_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
654 ; CHECK-LABEL: vmul_vx_nxv1i32_unmasked:
656 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
657 ; CHECK-NEXT: vmul.vx v8, v8, a0
659 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
660 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
661 %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
662 ret <vscale x 1 x i32> %v
665 declare <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
667 define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
668 ; CHECK-LABEL: vmul_vv_nxv2i32:
670 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
671 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
673 %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
674 ret <vscale x 2 x i32> %v
677 define <vscale x 2 x i32> @vmul_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
678 ; CHECK-LABEL: vmul_vv_nxv2i32_unmasked:
680 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
681 ; CHECK-NEXT: vmul.vv v8, v8, v9
683 %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
684 ret <vscale x 2 x i32> %v
687 define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
688 ; CHECK-LABEL: vmul_vx_nxv2i32:
690 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
691 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
693 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
694 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
695 %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
696 ret <vscale x 2 x i32> %v
699 define <vscale x 2 x i32> @vmul_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
700 ; CHECK-LABEL: vmul_vx_nxv2i32_unmasked:
702 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
703 ; CHECK-NEXT: vmul.vx v8, v8, a0
705 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
706 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
707 %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
708 ret <vscale x 2 x i32> %v
711 declare <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
713 define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
714 ; CHECK-LABEL: vmul_vv_nxv4i32:
716 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
717 ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
719 %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
720 ret <vscale x 4 x i32> %v
723 define <vscale x 4 x i32> @vmul_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
724 ; CHECK-LABEL: vmul_vv_nxv4i32_unmasked:
726 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
727 ; CHECK-NEXT: vmul.vv v8, v8, v10
729 %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
730 ret <vscale x 4 x i32> %v
733 define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
734 ; CHECK-LABEL: vmul_vx_nxv4i32:
736 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
737 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
739 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
740 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
741 %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
742 ret <vscale x 4 x i32> %v
745 define <vscale x 4 x i32> @vmul_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
746 ; CHECK-LABEL: vmul_vx_nxv4i32_unmasked:
748 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
749 ; CHECK-NEXT: vmul.vx v8, v8, a0
751 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
752 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
753 %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
754 ret <vscale x 4 x i32> %v
757 declare <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32>, <vscale x 7 x i32>, <vscale x 7 x i1>, i32)
759 define <vscale x 7 x i32> @vmul_vv_nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
760 ; CHECK-LABEL: vmul_vv_nxv7i32:
762 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
763 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
765 %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
766 ret <vscale x 7 x i32> %v
769 define <vscale x 7 x i32> @vmul_vv_nxv7i32_unmasked(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, i32 zeroext %evl) {
770 ; CHECK-LABEL: vmul_vv_nxv7i32_unmasked:
772 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
773 ; CHECK-NEXT: vmul.vv v8, v8, v12
775 %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> splat (i1 true), i32 %evl)
776 ret <vscale x 7 x i32> %v
779 define <vscale x 7 x i32> @vmul_vx_nxv7i32(<vscale x 7 x i32> %va, i32 %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
780 ; CHECK-LABEL: vmul_vx_nxv7i32:
782 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
783 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
785 %elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
786 %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> poison, <vscale x 7 x i32> zeroinitializer
787 %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> %m, i32 %evl)
788 ret <vscale x 7 x i32> %v
791 define <vscale x 7 x i32> @vmul_vx_nxv7i32_unmasked(<vscale x 7 x i32> %va, i32 %b, i32 zeroext %evl) {
792 ; CHECK-LABEL: vmul_vx_nxv7i32_unmasked:
794 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
795 ; CHECK-NEXT: vmul.vx v8, v8, a0
797 %elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
798 %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> poison, <vscale x 7 x i32> zeroinitializer
799 %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> splat (i1 true), i32 %evl)
800 ret <vscale x 7 x i32> %v
803 declare <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
805 define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
806 ; CHECK-LABEL: vmul_vv_nxv8i32:
808 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
809 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
811 %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
812 ret <vscale x 8 x i32> %v
815 define <vscale x 8 x i32> @vmul_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
816 ; CHECK-LABEL: vmul_vv_nxv8i32_unmasked:
818 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
819 ; CHECK-NEXT: vmul.vv v8, v8, v12
821 %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
822 ret <vscale x 8 x i32> %v
825 define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
826 ; CHECK-LABEL: vmul_vx_nxv8i32:
828 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
829 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
831 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
832 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
833 %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
834 ret <vscale x 8 x i32> %v
837 define <vscale x 8 x i32> @vmul_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
838 ; CHECK-LABEL: vmul_vx_nxv8i32_unmasked:
840 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
841 ; CHECK-NEXT: vmul.vx v8, v8, a0
843 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
844 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
845 %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
846 ret <vscale x 8 x i32> %v
849 declare <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
851 define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
852 ; CHECK-LABEL: vmul_vv_nxv16i32:
854 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
855 ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
857 %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
858 ret <vscale x 16 x i32> %v
861 define <vscale x 16 x i32> @vmul_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
862 ; CHECK-LABEL: vmul_vv_nxv16i32_unmasked:
864 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
865 ; CHECK-NEXT: vmul.vv v8, v8, v16
867 %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
868 ret <vscale x 16 x i32> %v
871 define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
872 ; CHECK-LABEL: vmul_vx_nxv16i32:
874 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
875 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
877 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
878 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
879 %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
880 ret <vscale x 16 x i32> %v
883 define <vscale x 16 x i32> @vmul_vx_nxv16i32_commute(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
884 ; CHECK-LABEL: vmul_vx_nxv16i32_commute:
886 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
887 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
889 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
890 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
891 %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
892 ret <vscale x 16 x i32> %v
895 define <vscale x 16 x i32> @vmul_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
896 ; CHECK-LABEL: vmul_vx_nxv16i32_unmasked:
898 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
899 ; CHECK-NEXT: vmul.vx v8, v8, a0
901 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
902 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
903 %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
904 ret <vscale x 16 x i32> %v
907 declare <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
909 define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
910 ; CHECK-LABEL: vmul_vv_nxv1i64:
912 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
913 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
915 %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
916 ret <vscale x 1 x i64> %v
919 define <vscale x 1 x i64> @vmul_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
920 ; CHECK-LABEL: vmul_vv_nxv1i64_unmasked:
922 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
923 ; CHECK-NEXT: vmul.vv v8, v8, v9
925 %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
926 ret <vscale x 1 x i64> %v
929 define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
930 ; RV32-LABEL: vmul_vx_nxv1i64:
932 ; RV32-NEXT: addi sp, sp, -16
933 ; RV32-NEXT: .cfi_def_cfa_offset 16
934 ; RV32-NEXT: sw a0, 8(sp)
935 ; RV32-NEXT: sw a1, 12(sp)
936 ; RV32-NEXT: addi a0, sp, 8
937 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
938 ; RV32-NEXT: vlse64.v v9, (a0), zero
939 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
940 ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
941 ; RV32-NEXT: addi sp, sp, 16
942 ; RV32-NEXT: .cfi_def_cfa_offset 0
945 ; RV64-LABEL: vmul_vx_nxv1i64:
947 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
948 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
950 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
951 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
952 %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
953 ret <vscale x 1 x i64> %v
956 define <vscale x 1 x i64> @vmul_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
957 ; RV32-LABEL: vmul_vx_nxv1i64_unmasked:
959 ; RV32-NEXT: addi sp, sp, -16
960 ; RV32-NEXT: .cfi_def_cfa_offset 16
961 ; RV32-NEXT: sw a0, 8(sp)
962 ; RV32-NEXT: sw a1, 12(sp)
963 ; RV32-NEXT: addi a0, sp, 8
964 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
965 ; RV32-NEXT: vlse64.v v9, (a0), zero
966 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
967 ; RV32-NEXT: vmul.vv v8, v8, v9
968 ; RV32-NEXT: addi sp, sp, 16
969 ; RV32-NEXT: .cfi_def_cfa_offset 0
972 ; RV64-LABEL: vmul_vx_nxv1i64_unmasked:
974 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
975 ; RV64-NEXT: vmul.vx v8, v8, a0
977 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
978 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
979 %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
980 ret <vscale x 1 x i64> %v
983 declare <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
985 define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
986 ; CHECK-LABEL: vmul_vv_nxv2i64:
988 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
989 ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
991 %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
992 ret <vscale x 2 x i64> %v
995 define <vscale x 2 x i64> @vmul_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
996 ; CHECK-LABEL: vmul_vv_nxv2i64_unmasked:
998 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
999 ; CHECK-NEXT: vmul.vv v8, v8, v10
1001 %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1002 ret <vscale x 2 x i64> %v
1005 define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1006 ; RV32-LABEL: vmul_vx_nxv2i64:
1008 ; RV32-NEXT: addi sp, sp, -16
1009 ; RV32-NEXT: .cfi_def_cfa_offset 16
1010 ; RV32-NEXT: sw a0, 8(sp)
1011 ; RV32-NEXT: sw a1, 12(sp)
1012 ; RV32-NEXT: addi a0, sp, 8
1013 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1014 ; RV32-NEXT: vlse64.v v10, (a0), zero
1015 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1016 ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
1017 ; RV32-NEXT: addi sp, sp, 16
1018 ; RV32-NEXT: .cfi_def_cfa_offset 0
1021 ; RV64-LABEL: vmul_vx_nxv2i64:
1023 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1024 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1026 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1027 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1028 %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1029 ret <vscale x 2 x i64> %v
1032 define <vscale x 2 x i64> @vmul_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1033 ; RV32-LABEL: vmul_vx_nxv2i64_unmasked:
1035 ; RV32-NEXT: addi sp, sp, -16
1036 ; RV32-NEXT: .cfi_def_cfa_offset 16
1037 ; RV32-NEXT: sw a0, 8(sp)
1038 ; RV32-NEXT: sw a1, 12(sp)
1039 ; RV32-NEXT: addi a0, sp, 8
1040 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1041 ; RV32-NEXT: vlse64.v v10, (a0), zero
1042 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1043 ; RV32-NEXT: vmul.vv v8, v8, v10
1044 ; RV32-NEXT: addi sp, sp, 16
1045 ; RV32-NEXT: .cfi_def_cfa_offset 0
1048 ; RV64-LABEL: vmul_vx_nxv2i64_unmasked:
1050 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1051 ; RV64-NEXT: vmul.vx v8, v8, a0
1053 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1054 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1055 %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1056 ret <vscale x 2 x i64> %v
1059 declare <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1061 define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1062 ; CHECK-LABEL: vmul_vv_nxv4i64:
1064 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1065 ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
1067 %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1068 ret <vscale x 4 x i64> %v
1071 define <vscale x 4 x i64> @vmul_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1072 ; CHECK-LABEL: vmul_vv_nxv4i64_unmasked:
1074 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1075 ; CHECK-NEXT: vmul.vv v8, v8, v12
1077 %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1078 ret <vscale x 4 x i64> %v
1081 define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1082 ; RV32-LABEL: vmul_vx_nxv4i64:
1084 ; RV32-NEXT: addi sp, sp, -16
1085 ; RV32-NEXT: .cfi_def_cfa_offset 16
1086 ; RV32-NEXT: sw a0, 8(sp)
1087 ; RV32-NEXT: sw a1, 12(sp)
1088 ; RV32-NEXT: addi a0, sp, 8
1089 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1090 ; RV32-NEXT: vlse64.v v12, (a0), zero
1091 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1092 ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
1093 ; RV32-NEXT: addi sp, sp, 16
1094 ; RV32-NEXT: .cfi_def_cfa_offset 0
1097 ; RV64-LABEL: vmul_vx_nxv4i64:
1099 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1100 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1102 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1103 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1104 %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1105 ret <vscale x 4 x i64> %v
1108 define <vscale x 4 x i64> @vmul_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1109 ; RV32-LABEL: vmul_vx_nxv4i64_unmasked:
1111 ; RV32-NEXT: addi sp, sp, -16
1112 ; RV32-NEXT: .cfi_def_cfa_offset 16
1113 ; RV32-NEXT: sw a0, 8(sp)
1114 ; RV32-NEXT: sw a1, 12(sp)
1115 ; RV32-NEXT: addi a0, sp, 8
1116 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1117 ; RV32-NEXT: vlse64.v v12, (a0), zero
1118 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1119 ; RV32-NEXT: vmul.vv v8, v8, v12
1120 ; RV32-NEXT: addi sp, sp, 16
1121 ; RV32-NEXT: .cfi_def_cfa_offset 0
1124 ; RV64-LABEL: vmul_vx_nxv4i64_unmasked:
1126 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1127 ; RV64-NEXT: vmul.vx v8, v8, a0
1129 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1130 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1131 %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1132 ret <vscale x 4 x i64> %v
1135 declare <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1137 define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1138 ; CHECK-LABEL: vmul_vv_nxv8i64:
1140 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1141 ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
1143 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1144 ret <vscale x 8 x i64> %v
1147 define <vscale x 8 x i64> @vmul_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1148 ; CHECK-LABEL: vmul_vv_nxv8i64_unmasked:
1150 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1151 ; CHECK-NEXT: vmul.vv v8, v8, v16
1153 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1154 ret <vscale x 8 x i64> %v
1157 define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1158 ; RV32-LABEL: vmul_vx_nxv8i64:
1160 ; RV32-NEXT: addi sp, sp, -16
1161 ; RV32-NEXT: .cfi_def_cfa_offset 16
1162 ; RV32-NEXT: sw a0, 8(sp)
1163 ; RV32-NEXT: sw a1, 12(sp)
1164 ; RV32-NEXT: addi a0, sp, 8
1165 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1166 ; RV32-NEXT: vlse64.v v16, (a0), zero
1167 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1168 ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
1169 ; RV32-NEXT: addi sp, sp, 16
1170 ; RV32-NEXT: .cfi_def_cfa_offset 0
1173 ; RV64-LABEL: vmul_vx_nxv8i64:
1175 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1176 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1178 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1179 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1180 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1181 ret <vscale x 8 x i64> %v
1184 define <vscale x 8 x i64> @vmul_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1185 ; RV32-LABEL: vmul_vx_nxv8i64_unmasked:
1187 ; RV32-NEXT: addi sp, sp, -16
1188 ; RV32-NEXT: .cfi_def_cfa_offset 16
1189 ; RV32-NEXT: sw a0, 8(sp)
1190 ; RV32-NEXT: sw a1, 12(sp)
1191 ; RV32-NEXT: addi a0, sp, 8
1192 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1193 ; RV32-NEXT: vlse64.v v16, (a0), zero
1194 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1195 ; RV32-NEXT: vmul.vv v8, v8, v16
1196 ; RV32-NEXT: addi sp, sp, 16
1197 ; RV32-NEXT: .cfi_def_cfa_offset 0
1200 ; RV64-LABEL: vmul_vx_nxv8i64_unmasked:
1202 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1203 ; RV64-NEXT: vmul.vx v8, v8, a0
1205 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1206 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1207 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1208 ret <vscale x 8 x i64> %v
1211 define <vscale x 8 x i64> @vmul_vv_undef_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1212 ; CHECK-LABEL: vmul_vv_undef_nxv8i64:
1214 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1215 ; CHECK-NEXT: vmv.v.i v8, 0
1217 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> undef, <vscale x 8 x i1> %m, i32 %evl)
1218 ret <vscale x 8 x i64> %v
1221 define <vscale x 8 x i64> @vmul_vx_undef_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1222 ; CHECK-LABEL: vmul_vx_undef_nxv8i64_unmasked:
1224 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1225 ; CHECK-NEXT: vmv.v.i v8, 0
1227 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1228 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1229 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> undef, <vscale x 8 x i1> %m, i32 %evl)
1230 ret <vscale x 8 x i64> %v
1233 define <vscale x 8 x i64> @vmul_vx_zero_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1234 ; CHECK-LABEL: vmul_vx_zero_nxv8i64:
1236 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1237 ; CHECK-NEXT: vmv.v.i v8, 0
1239 %elt.head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
1240 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1241 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1242 ret <vscale x 8 x i64> %v
1245 define <vscale x 8 x i64> @vmul_vx_zero_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1246 ; CHECK-LABEL: vmul_vx_zero_nxv8i64_unmasked:
1248 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1249 ; CHECK-NEXT: vmv.v.i v8, 0
1251 %elt.head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
1252 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1253 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1254 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1255 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1256 ret <vscale x 8 x i64> %v
1259 define <vscale x 8 x i64> @vmul_vx_one_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1260 ; CHECK-LABEL: vmul_vx_one_nxv8i64:
1263 %elt.head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
1264 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1265 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1266 ret <vscale x 8 x i64> %v
1269 define <vscale x 8 x i64> @vmul_vx_one_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1270 ; CHECK-LABEL: vmul_vx_one_nxv8i64_unmasked:
1273 %elt.head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
1274 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1275 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1276 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1277 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1278 ret <vscale x 8 x i64> %v
1281 define <vscale x 8 x i64> @vmul_vx_negone_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1282 ; CHECK-LABEL: vmul_vx_negone_nxv8i64:
1284 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1285 ; CHECK-NEXT: vrsub.vi v8, v8, 0, v0.t
1287 %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
1288 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1289 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1290 ret <vscale x 8 x i64> %v
1293 define <vscale x 8 x i64> @vmul_vx_negone_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1294 ; CHECK-LABEL: vmul_vx_negone_nxv8i64_unmasked:
1296 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1297 ; CHECK-NEXT: vrsub.vi v8, v8, 0
1299 %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
1300 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1301 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1302 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1303 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1304 ret <vscale x 8 x i64> %v
1307 define <vscale x 8 x i64> @vmul_vx_pow2_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1308 ; CHECK-LABEL: vmul_vx_pow2_nxv8i64:
1310 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1311 ; CHECK-NEXT: vsll.vi v8, v8, 6, v0.t
1313 %elt.head = insertelement <vscale x 8 x i64> poison, i64 64, i32 0
1314 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1315 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1316 ret <vscale x 8 x i64> %v
1319 define <vscale x 8 x i64> @vmul_vx_pow2_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1320 ; CHECK-LABEL: vmul_vx_pow2_nxv8i64_unmasked:
1322 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1323 ; CHECK-NEXT: vsll.vi v8, v8, 6
1325 %elt.head = insertelement <vscale x 8 x i64> poison, i64 64, i32 0
1326 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1327 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1328 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1329 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1330 ret <vscale x 8 x i64> %v
1333 define <vscale x 8 x i64> @vmul_vx_negpow2_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1334 ; CHECK-LABEL: vmul_vx_negpow2_nxv8i64:
1336 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1337 ; CHECK-NEXT: vsll.vi v8, v8, 6, v0.t
1338 ; CHECK-NEXT: vrsub.vi v8, v8, 0, v0.t
1340 %elt.head = insertelement <vscale x 8 x i64> poison, i64 -64, i32 0
1341 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1342 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1343 ret <vscale x 8 x i64> %v
1346 define <vscale x 8 x i64> @vmul_vx_negpow2_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1347 ; CHECK-LABEL: vmul_vx_negpow2_nxv8i64_unmasked:
1349 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1350 ; CHECK-NEXT: vsll.vi v8, v8, 6
1351 ; CHECK-NEXT: vrsub.vi v8, v8, 0
1353 %elt.head = insertelement <vscale x 8 x i64> poison, i64 -64, i32 0
1354 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1355 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1356 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1357 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1358 ret <vscale x 8 x i64> %v
1361 declare <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1363 define <vscale x 8 x i64> @vmul_vshl_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1364 ; CHECK-LABEL: vmul_vshl_vx_nxv8i64:
1366 ; CHECK-NEXT: li a0, 56
1367 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1368 ; CHECK-NEXT: vmul.vx v8, v8, a0
1370 %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1371 %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1372 %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1373 %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1374 %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1375 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1376 ret <vscale x 8 x i64> %v
1379 define <vscale x 8 x i64> @vmul_vshl_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1380 ; CHECK-LABEL: vmul_vshl_vx_nxv8i64_unmasked:
1382 ; CHECK-NEXT: li a0, 56
1383 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1384 ; CHECK-NEXT: vmul.vx v8, v8, a0
1386 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1387 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1388 %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1389 %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1390 %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1391 %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1392 %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1393 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1394 ret <vscale x 8 x i64> %v
1397 define <vscale x 8 x i64> @vmul_vshl_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1398 ; CHECK-LABEL: vmul_vshl_vv_nxv8i64:
1400 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1401 ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
1402 ; CHECK-NEXT: vsll.vi v8, v8, 7, v0.t
1404 %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1405 %vc = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1406 %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1407 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1408 ret <vscale x 8 x i64> %v
1411 define <vscale x 8 x i64> @vmul_vshl_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, i32 zeroext %evl) {
1412 ; CHECK-LABEL: vmul_vshl_vv_nxv8i64_unmasked:
1414 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1415 ; CHECK-NEXT: vmul.vv v8, v8, v16
1416 ; CHECK-NEXT: vsll.vi v8, v8, 7
1418 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1419 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1420 %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1421 %vc = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1422 %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1423 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1424 ret <vscale x 8 x i64> %v
1427 declare <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1429 define <vscale x 8 x i64> @vmul_vadd_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1430 ; CHECK-LABEL: vmul_vadd_vx_nxv8i64:
1432 ; CHECK-NEXT: li a1, 7
1433 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1434 ; CHECK-NEXT: vmul.vx v8, v8, a1, v0.t
1435 ; CHECK-NEXT: li a0, 21
1436 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1438 %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1439 %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1440 %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1441 %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1442 %vadd = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1443 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vadd, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1444 ret <vscale x 8 x i64> %v
1447 define <vscale x 8 x i64> @vmul_vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1448 ; CHECK-LABEL: vmul_vadd_vx_nxv8i64_unmasked:
1450 ; CHECK-NEXT: li a1, 21
1451 ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
1452 ; CHECK-NEXT: vmv.v.x v16, a1
1453 ; CHECK-NEXT: li a1, 7
1454 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1455 ; CHECK-NEXT: vmadd.vx v8, a1, v16
1457 %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1458 %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1459 %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1460 %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1461 %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1462 %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1463 %vadd = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1464 %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vadd, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1465 ret <vscale x 8 x i64> %v