1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i64> @vwmul_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
6 ; CHECK-LABEL: vwmul_vv_nxv1i64:
8 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
9 ; CHECK-NEXT: vwmul.vv v10, v8, v9
10 ; CHECK-NEXT: vmv1r.v v8, v10
12 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
13 %vd = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
14 %ve = mul <vscale x 1 x i64> %vc, %vd
15 ret <vscale x 1 x i64> %ve
18 define <vscale x 1 x i64> @vwmulu_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
19 ; CHECK-LABEL: vwmulu_vv_nxv1i64:
21 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
22 ; CHECK-NEXT: vwmulu.vv v10, v8, v9
23 ; CHECK-NEXT: vmv1r.v v8, v10
25 %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
26 %vd = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
27 %ve = mul <vscale x 1 x i64> %vc, %vd
28 ret <vscale x 1 x i64> %ve
31 define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
32 ; CHECK-LABEL: vwmulsu_vv_nxv1i64:
34 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
35 ; CHECK-NEXT: vwmulsu.vv v10, v8, v9
36 ; CHECK-NEXT: vmv1r.v v8, v10
38 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
39 %vd = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
40 %ve = mul <vscale x 1 x i64> %vc, %vd
41 ret <vscale x 1 x i64> %ve
44 define <vscale x 1 x i64> @vwmul_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
45 ; CHECK-LABEL: vwmul_vx_nxv1i64:
47 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
48 ; CHECK-NEXT: vwmul.vx v9, v8, a0
49 ; CHECK-NEXT: vmv1r.v v8, v9
51 %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
52 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
53 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
54 %vd = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
55 %ve = mul <vscale x 1 x i64> %vc, %vd
56 ret <vscale x 1 x i64> %ve
59 define <vscale x 1 x i64> @vwmulu_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
60 ; CHECK-LABEL: vwmulu_vx_nxv1i64:
62 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
63 ; CHECK-NEXT: vwmulu.vx v9, v8, a0
64 ; CHECK-NEXT: vmv1r.v v8, v9
66 %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
67 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
68 %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
69 %vd = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
70 %ve = mul <vscale x 1 x i64> %vc, %vd
71 ret <vscale x 1 x i64> %ve
74 define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
75 ; CHECK-LABEL: vwmulsu_vx_nxv1i64:
77 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
78 ; CHECK-NEXT: vwmulsu.vx v9, v8, a0
79 ; CHECK-NEXT: vmv1r.v v8, v9
81 %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
82 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
83 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
84 %vd = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
85 %ve = mul <vscale x 1 x i64> %vc, %vd
86 ret <vscale x 1 x i64> %ve
89 define <vscale x 2 x i64> @vwmul_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
90 ; CHECK-LABEL: vwmul_vv_nxv2i64:
92 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
93 ; CHECK-NEXT: vwmul.vv v10, v8, v9
94 ; CHECK-NEXT: vmv2r.v v8, v10
96 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
97 %vd = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
98 %ve = mul <vscale x 2 x i64> %vc, %vd
99 ret <vscale x 2 x i64> %ve
102 define <vscale x 2 x i64> @vwmulu_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
103 ; CHECK-LABEL: vwmulu_vv_nxv2i64:
105 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
106 ; CHECK-NEXT: vwmulu.vv v10, v8, v9
107 ; CHECK-NEXT: vmv2r.v v8, v10
109 %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
110 %vd = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
111 %ve = mul <vscale x 2 x i64> %vc, %vd
112 ret <vscale x 2 x i64> %ve
115 define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
116 ; CHECK-LABEL: vwmulsu_vv_nxv2i64:
118 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
119 ; CHECK-NEXT: vwmulsu.vv v10, v8, v9
120 ; CHECK-NEXT: vmv2r.v v8, v10
122 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
123 %vd = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
124 %ve = mul <vscale x 2 x i64> %vc, %vd
125 ret <vscale x 2 x i64> %ve
128 define <vscale x 2 x i64> @vwmul_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
129 ; CHECK-LABEL: vwmul_vx_nxv2i64:
131 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
132 ; CHECK-NEXT: vwmul.vx v10, v8, a0
133 ; CHECK-NEXT: vmv2r.v v8, v10
135 %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
136 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
137 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
138 %vd = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
139 %ve = mul <vscale x 2 x i64> %vc, %vd
140 ret <vscale x 2 x i64> %ve
143 define <vscale x 2 x i64> @vwmulu_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
144 ; CHECK-LABEL: vwmulu_vx_nxv2i64:
146 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
147 ; CHECK-NEXT: vwmulu.vx v10, v8, a0
148 ; CHECK-NEXT: vmv2r.v v8, v10
150 %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
151 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
152 %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
153 %vd = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
154 %ve = mul <vscale x 2 x i64> %vc, %vd
155 ret <vscale x 2 x i64> %ve
158 define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
159 ; CHECK-LABEL: vwmulsu_vx_nxv2i64:
161 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
162 ; CHECK-NEXT: vwmulsu.vx v10, v8, a0
163 ; CHECK-NEXT: vmv2r.v v8, v10
165 %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
166 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
167 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
168 %vd = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
169 %ve = mul <vscale x 2 x i64> %vc, %vd
170 ret <vscale x 2 x i64> %ve
173 define <vscale x 4 x i64> @vwmul_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
174 ; CHECK-LABEL: vwmul_vv_nxv4i64:
176 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
177 ; CHECK-NEXT: vwmul.vv v12, v8, v10
178 ; CHECK-NEXT: vmv4r.v v8, v12
180 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
181 %vd = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
182 %ve = mul <vscale x 4 x i64> %vc, %vd
183 ret <vscale x 4 x i64> %ve
186 define <vscale x 4 x i64> @vwmulu_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
187 ; CHECK-LABEL: vwmulu_vv_nxv4i64:
189 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
190 ; CHECK-NEXT: vwmulu.vv v12, v8, v10
191 ; CHECK-NEXT: vmv4r.v v8, v12
193 %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
194 %vd = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
195 %ve = mul <vscale x 4 x i64> %vc, %vd
196 ret <vscale x 4 x i64> %ve
199 define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
200 ; CHECK-LABEL: vwmulsu_vv_nxv4i64:
202 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
203 ; CHECK-NEXT: vwmulsu.vv v12, v8, v10
204 ; CHECK-NEXT: vmv4r.v v8, v12
206 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
207 %vd = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
208 %ve = mul <vscale x 4 x i64> %vc, %vd
209 ret <vscale x 4 x i64> %ve
212 define <vscale x 4 x i64> @vwmul_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
213 ; CHECK-LABEL: vwmul_vx_nxv4i64:
215 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
216 ; CHECK-NEXT: vwmul.vx v12, v8, a0
217 ; CHECK-NEXT: vmv4r.v v8, v12
219 %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
220 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
221 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
222 %vd = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
223 %ve = mul <vscale x 4 x i64> %vc, %vd
224 ret <vscale x 4 x i64> %ve
227 define <vscale x 4 x i64> @vwmulu_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
228 ; CHECK-LABEL: vwmulu_vx_nxv4i64:
230 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
231 ; CHECK-NEXT: vwmulu.vx v12, v8, a0
232 ; CHECK-NEXT: vmv4r.v v8, v12
234 %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
235 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
236 %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
237 %vd = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
238 %ve = mul <vscale x 4 x i64> %vc, %vd
239 ret <vscale x 4 x i64> %ve
242 define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
243 ; CHECK-LABEL: vwmulsu_vx_nxv4i64:
245 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
246 ; CHECK-NEXT: vwmulsu.vx v12, v8, a0
247 ; CHECK-NEXT: vmv4r.v v8, v12
249 %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
250 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
251 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
252 %vd = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
253 %ve = mul <vscale x 4 x i64> %vc, %vd
254 ret <vscale x 4 x i64> %ve
257 define <vscale x 8 x i64> @vwmul_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
258 ; CHECK-LABEL: vwmul_vv_nxv8i64:
260 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
261 ; CHECK-NEXT: vwmul.vv v16, v8, v12
262 ; CHECK-NEXT: vmv8r.v v8, v16
264 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
265 %vd = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
266 %ve = mul <vscale x 8 x i64> %vc, %vd
267 ret <vscale x 8 x i64> %ve
270 define <vscale x 8 x i64> @vwmulu_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
271 ; CHECK-LABEL: vwmulu_vv_nxv8i64:
273 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
274 ; CHECK-NEXT: vwmulu.vv v16, v8, v12
275 ; CHECK-NEXT: vmv8r.v v8, v16
277 %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
278 %vd = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
279 %ve = mul <vscale x 8 x i64> %vc, %vd
280 ret <vscale x 8 x i64> %ve
283 define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
284 ; CHECK-LABEL: vwmulsu_vv_nxv8i64:
286 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
287 ; CHECK-NEXT: vwmulsu.vv v16, v8, v12
288 ; CHECK-NEXT: vmv8r.v v8, v16
290 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
291 %vd = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
292 %ve = mul <vscale x 8 x i64> %vc, %vd
293 ret <vscale x 8 x i64> %ve
296 define <vscale x 8 x i64> @vwmul_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
297 ; CHECK-LABEL: vwmul_vx_nxv8i64:
299 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
300 ; CHECK-NEXT: vwmul.vx v16, v8, a0
301 ; CHECK-NEXT: vmv8r.v v8, v16
303 %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
304 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
305 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
306 %vd = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
307 %ve = mul <vscale x 8 x i64> %vc, %vd
308 ret <vscale x 8 x i64> %ve
311 define <vscale x 8 x i64> @vwmulu_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
312 ; CHECK-LABEL: vwmulu_vx_nxv8i64:
314 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
315 ; CHECK-NEXT: vwmulu.vx v16, v8, a0
316 ; CHECK-NEXT: vmv8r.v v8, v16
318 %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
319 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
320 %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
321 %vd = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
322 %ve = mul <vscale x 8 x i64> %vc, %vd
323 ret <vscale x 8 x i64> %ve
326 define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
327 ; CHECK-LABEL: vwmulsu_vx_nxv8i64:
329 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
330 ; CHECK-NEXT: vwmulsu.vx v16, v8, a0
331 ; CHECK-NEXT: vmv8r.v v8, v16
333 %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
334 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
335 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
336 %vd = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
337 %ve = mul <vscale x 8 x i64> %vc, %vd
338 ret <vscale x 8 x i64> %ve