1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f,+d,+zvfh,+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f,+d,+zvfh,+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
8 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
9 define <vscale x 1 x i64> @commutable_vadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
10 ; CHECK-LABEL: commutable_vadd_vv:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
13 ; CHECK-NEXT: vadd.vv v8, v8, v9
14 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
15 ; CHECK-NEXT: vadd.vv v8, v8, v8
18 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
19 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
20 %ret = add <vscale x 1 x i64> %a, %b
21 ret <vscale x 1 x i64> %ret
24 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
25 define <vscale x 1 x i64> @commutable_vadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
26 ; CHECK-LABEL: commutable_vadd_vv_masked:
28 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
29 ; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
30 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
31 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
32 ; CHECK-NEXT: vadd.vv v8, v10, v8
34 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
35 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
36 %ret = add <vscale x 1 x i64> %a, %b
37 ret <vscale x 1 x i64> %ret
41 declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
42 define <vscale x 1 x i64> @commutable_vand_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
43 ; CHECK-LABEL: commutable_vand_vv:
44 ; CHECK: # %bb.0: # %entry
45 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
46 ; CHECK-NEXT: vand.vv v8, v8, v9
47 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
48 ; CHECK-NEXT: vadd.vv v8, v8, v8
51 %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
52 %b = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
53 %ret = add <vscale x 1 x i64> %a, %b
54 ret <vscale x 1 x i64> %ret
57 declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
58 define <vscale x 1 x i64> @commutable_vand_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
59 ; CHECK-LABEL: commutable_vand_vv_masked:
61 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
62 ; CHECK-NEXT: vand.vv v10, v8, v9, v0.t
63 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
64 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
65 ; CHECK-NEXT: vadd.vv v8, v10, v8
67 %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
68 %b = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
69 %ret = add <vscale x 1 x i64> %a, %b
70 ret <vscale x 1 x i64> %ret
74 declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
75 define <vscale x 1 x i64> @commutable_vor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
76 ; CHECK-LABEL: commutable_vor_vv:
77 ; CHECK: # %bb.0: # %entry
78 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
79 ; CHECK-NEXT: vor.vv v8, v8, v9
80 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
81 ; CHECK-NEXT: vadd.vv v8, v8, v8
84 %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
85 %b = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
86 %ret = add <vscale x 1 x i64> %a, %b
87 ret <vscale x 1 x i64> %ret
90 declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
91 define <vscale x 1 x i64> @commutable_vor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
92 ; CHECK-LABEL: commutable_vor_vv_masked:
94 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
95 ; CHECK-NEXT: vor.vv v10, v8, v9, v0.t
96 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
97 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
98 ; CHECK-NEXT: vadd.vv v8, v10, v8
100 %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
101 %b = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
102 %ret = add <vscale x 1 x i64> %a, %b
103 ret <vscale x 1 x i64> %ret
107 declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
108 define <vscale x 1 x i64> @commutable_vxor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
109 ; CHECK-LABEL: commutable_vxor_vv:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
112 ; CHECK-NEXT: vxor.vv v8, v8, v9
113 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
114 ; CHECK-NEXT: vadd.vv v8, v8, v8
117 %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
118 %b = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
119 %ret = add <vscale x 1 x i64> %a, %b
120 ret <vscale x 1 x i64> %ret
123 declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
124 define <vscale x 1 x i64> @commutable_vxor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
125 ; CHECK-LABEL: commutable_vxor_vv_masked:
127 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
128 ; CHECK-NEXT: vxor.vv v10, v8, v9, v0.t
129 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
130 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
131 ; CHECK-NEXT: vadd.vv v8, v10, v8
133 %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
134 %b = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
135 %ret = add <vscale x 1 x i64> %a, %b
136 ret <vscale x 1 x i64> %ret
140 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
141 define <vscale x 1 x i1> @commutable_vmseq_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
142 ; CHECK-LABEL: commutable_vmseq_vv:
143 ; CHECK: # %bb.0: # %entry
144 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
145 ; CHECK-NEXT: vmseq.vv v8, v8, v9
146 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
147 ; CHECK-NEXT: vmxor.mm v0, v8, v8
150 %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
151 %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
152 %ret = add <vscale x 1 x i1> %a, %b
153 ret <vscale x 1 x i1> %ret
156 declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
157 define <vscale x 1 x i1> @commutable_vmseq_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
158 ; CHECK-LABEL: commutable_vmseq_vv_masked:
160 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
161 ; CHECK-NEXT: vmseq.vv v10, v8, v9, v0.t
162 ; CHECK-NEXT: vmseq.vv v8, v8, v9, v0.t
163 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
164 ; CHECK-NEXT: vmxor.mm v0, v10, v8
166 %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
167 %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
168 %ret = add <vscale x 1 x i1> %a, %b
169 ret <vscale x 1 x i1> %ret
173 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
174 define <vscale x 1 x i1> @commutable_vmsne_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
175 ; CHECK-LABEL: commutable_vmsne_vv:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
178 ; CHECK-NEXT: vmsne.vv v8, v8, v9
179 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
180 ; CHECK-NEXT: vmxor.mm v0, v8, v8
183 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
184 %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
185 %ret = add <vscale x 1 x i1> %a, %b
186 ret <vscale x 1 x i1> %ret
189 declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
190 define <vscale x 1 x i1> @commutable_vmsne_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
191 ; CHECK-LABEL: commutable_vmsne_vv_masked:
193 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
194 ; CHECK-NEXT: vmsne.vv v10, v8, v9, v0.t
195 ; CHECK-NEXT: vmsne.vv v8, v8, v9, v0.t
196 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
197 ; CHECK-NEXT: vmxor.mm v0, v10, v8
199 %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
200 %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
201 %ret = add <vscale x 1 x i1> %a, %b
202 ret <vscale x 1 x i1> %ret
206 declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
207 define <vscale x 1 x i64> @commutable_vmin_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
208 ; CHECK-LABEL: commutable_vmin_vv:
209 ; CHECK: # %bb.0: # %entry
210 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
211 ; CHECK-NEXT: vmin.vv v8, v8, v9
212 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
213 ; CHECK-NEXT: vadd.vv v8, v8, v8
216 %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
217 %b = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
218 %ret = add <vscale x 1 x i64> %a, %b
219 ret <vscale x 1 x i64> %ret
222 declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
223 define <vscale x 1 x i64> @commutable_vmin_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
224 ; CHECK-LABEL: commutable_vmin_vv_masked:
226 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
227 ; CHECK-NEXT: vmin.vv v10, v8, v9, v0.t
228 ; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
229 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
230 ; CHECK-NEXT: vadd.vv v8, v10, v8
232 %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
233 %b = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
234 %ret = add <vscale x 1 x i64> %a, %b
235 ret <vscale x 1 x i64> %ret
239 declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
240 define <vscale x 1 x i64> @commutable_vminu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
241 ; CHECK-LABEL: commutable_vminu_vv:
242 ; CHECK: # %bb.0: # %entry
243 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
244 ; CHECK-NEXT: vminu.vv v8, v8, v9
245 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
246 ; CHECK-NEXT: vadd.vv v8, v8, v8
249 %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
250 %b = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
251 %ret = add <vscale x 1 x i64> %a, %b
252 ret <vscale x 1 x i64> %ret
255 declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
256 define <vscale x 1 x i64> @commutable_vminu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
257 ; CHECK-LABEL: commutable_vminu_vv_masked:
259 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
260 ; CHECK-NEXT: vminu.vv v10, v8, v9, v0.t
261 ; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
262 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
263 ; CHECK-NEXT: vadd.vv v8, v10, v8
265 %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
266 %b = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
267 %ret = add <vscale x 1 x i64> %a, %b
268 ret <vscale x 1 x i64> %ret
272 declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
273 define <vscale x 1 x i64> @commutable_vmax_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
274 ; CHECK-LABEL: commutable_vmax_vv:
275 ; CHECK: # %bb.0: # %entry
276 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
277 ; CHECK-NEXT: vmax.vv v8, v8, v9
278 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
279 ; CHECK-NEXT: vadd.vv v8, v8, v8
282 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
283 %b = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
284 %ret = add <vscale x 1 x i64> %a, %b
285 ret <vscale x 1 x i64> %ret
288 declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
289 define <vscale x 1 x i64> @commutable_vmax_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
290 ; CHECK-LABEL: commutable_vmax_vv_masked:
292 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
293 ; CHECK-NEXT: vmax.vv v10, v8, v9, v0.t
294 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
295 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
296 ; CHECK-NEXT: vadd.vv v8, v10, v8
298 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
299 %b = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
300 %ret = add <vscale x 1 x i64> %a, %b
301 ret <vscale x 1 x i64> %ret
305 declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
306 define <vscale x 1 x i64> @commutable_vmaxu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
307 ; CHECK-LABEL: commutable_vmaxu_vv:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
310 ; CHECK-NEXT: vmaxu.vv v8, v8, v9
311 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
312 ; CHECK-NEXT: vadd.vv v8, v8, v8
315 %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
316 %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
317 %ret = add <vscale x 1 x i64> %a, %b
318 ret <vscale x 1 x i64> %ret
321 declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
322 define <vscale x 1 x i64> @commutable_vmaxu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
323 ; CHECK-LABEL: commutable_vmaxu_vv_masked:
325 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
326 ; CHECK-NEXT: vmaxu.vv v10, v8, v9, v0.t
327 ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
328 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
329 ; CHECK-NEXT: vadd.vv v8, v10, v8
331 %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
332 %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
333 %ret = add <vscale x 1 x i64> %a, %b
334 ret <vscale x 1 x i64> %ret
338 declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
339 define <vscale x 1 x i64> @commutable_vmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
340 ; CHECK-LABEL: commutable_vmul_vv:
341 ; CHECK: # %bb.0: # %entry
342 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
343 ; CHECK-NEXT: vmul.vv v8, v8, v9
344 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
345 ; CHECK-NEXT: vadd.vv v8, v8, v8
348 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
349 %b = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
350 %ret = add <vscale x 1 x i64> %a, %b
351 ret <vscale x 1 x i64> %ret
354 declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
355 define <vscale x 1 x i64> @commutable_vmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
356 ; CHECK-LABEL: commutable_vmul_vv_masked:
358 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
359 ; CHECK-NEXT: vmul.vv v10, v8, v9, v0.t
360 ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
361 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
362 ; CHECK-NEXT: vadd.vv v8, v10, v8
364 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
365 %b = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
366 %ret = add <vscale x 1 x i64> %a, %b
367 ret <vscale x 1 x i64> %ret
371 declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
372 define <vscale x 1 x i64> @commutable_vmulh_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
373 ; CHECK-LABEL: commutable_vmulh_vv:
374 ; CHECK: # %bb.0: # %entry
375 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
376 ; CHECK-NEXT: vmulh.vv v8, v8, v9
377 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
378 ; CHECK-NEXT: vadd.vv v8, v8, v8
381 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
382 %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
383 %ret = add <vscale x 1 x i64> %a, %b
384 ret <vscale x 1 x i64> %ret
387 declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
388 define <vscale x 1 x i64> @commutable_vmulh_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
389 ; CHECK-LABEL: commutable_vmulh_vv_masked:
391 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
392 ; CHECK-NEXT: vmulh.vv v10, v8, v9, v0.t
393 ; CHECK-NEXT: vmulh.vv v8, v8, v9, v0.t
394 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
395 ; CHECK-NEXT: vadd.vv v8, v10, v8
397 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
398 %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
399 %ret = add <vscale x 1 x i64> %a, %b
400 ret <vscale x 1 x i64> %ret
404 declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
405 define <vscale x 1 x i64> @commutable_vmulhu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
406 ; CHECK-LABEL: commutable_vmulhu_vv:
407 ; CHECK: # %bb.0: # %entry
408 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
409 ; CHECK-NEXT: vmulhu.vv v8, v8, v9
410 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
411 ; CHECK-NEXT: vadd.vv v8, v8, v8
414 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
415 %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
416 %ret = add <vscale x 1 x i64> %a, %b
417 ret <vscale x 1 x i64> %ret
420 declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
421 define <vscale x 1 x i64> @commutable_vmulhu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
422 ; CHECK-LABEL: commutable_vmulhu_vv_masked:
424 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
425 ; CHECK-NEXT: vmulhu.vv v10, v8, v9, v0.t
426 ; CHECK-NEXT: vmulhu.vv v8, v8, v9, v0.t
427 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
428 ; CHECK-NEXT: vadd.vv v8, v10, v8
430 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
431 %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
432 %ret = add <vscale x 1 x i64> %a, %b
433 ret <vscale x 1 x i64> %ret
437 declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
438 define <vscale x 1 x i64> @commutable_vwadd_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
439 ; CHECK-LABEL: commutable_vwadd_vv:
440 ; CHECK: # %bb.0: # %entry
441 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
442 ; CHECK-NEXT: vwadd.vv v10, v8, v9
443 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
444 ; CHECK-NEXT: vadd.vv v8, v10, v10
447 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
448 %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
449 %ret = add <vscale x 1 x i64> %a, %b
450 ret <vscale x 1 x i64> %ret
453 declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
454 define <vscale x 1 x i64> @commutable_vwadd_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
455 ; CHECK-LABEL: commutable_vwadd_vv_masked:
457 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
458 ; CHECK-NEXT: vwadd.vv v10, v8, v9, v0.t
459 ; CHECK-NEXT: vwadd.vv v11, v8, v9, v0.t
460 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
461 ; CHECK-NEXT: vadd.vv v8, v10, v11
463 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
464 %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
465 %ret = add <vscale x 1 x i64> %a, %b
466 ret <vscale x 1 x i64> %ret
470 declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
471 define <vscale x 1 x i64> @commutable_vwaddu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
472 ; CHECK-LABEL: commutable_vwaddu_vv:
473 ; CHECK: # %bb.0: # %entry
474 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
475 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
476 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
477 ; CHECK-NEXT: vadd.vv v8, v10, v10
480 %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
481 %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
482 %ret = add <vscale x 1 x i64> %a, %b
483 ret <vscale x 1 x i64> %ret
486 declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
487 define <vscale x 1 x i64> @commutable_vwaddu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
488 ; CHECK-LABEL: commutable_vwaddu_vv_masked:
490 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
491 ; CHECK-NEXT: vwaddu.vv v10, v8, v9, v0.t
492 ; CHECK-NEXT: vwaddu.vv v11, v8, v9, v0.t
493 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
494 ; CHECK-NEXT: vadd.vv v8, v10, v11
496 %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
497 %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
498 %ret = add <vscale x 1 x i64> %a, %b
499 ret <vscale x 1 x i64> %ret
503 declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
504 define <vscale x 1 x i64> @commutable_vwmul_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
505 ; CHECK-LABEL: commutable_vwmul_vv:
506 ; CHECK: # %bb.0: # %entry
507 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
508 ; CHECK-NEXT: vwmul.vv v10, v8, v9
509 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
510 ; CHECK-NEXT: vadd.vv v8, v10, v10
513 %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
514 %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
515 %ret = add <vscale x 1 x i64> %a, %b
516 ret <vscale x 1 x i64> %ret
519 declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
520 define <vscale x 1 x i64> @commutable_vwmul_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
521 ; CHECK-LABEL: commutable_vwmul_vv_masked:
523 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
524 ; CHECK-NEXT: vwmul.vv v10, v8, v9, v0.t
525 ; CHECK-NEXT: vwmul.vv v11, v8, v9, v0.t
526 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
527 ; CHECK-NEXT: vadd.vv v8, v10, v11
529 %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
530 %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
531 %ret = add <vscale x 1 x i64> %a, %b
532 ret <vscale x 1 x i64> %ret
536 declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
537 define <vscale x 1 x i64> @commutable_vwmulu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
538 ; CHECK-LABEL: commutable_vwmulu_vv:
539 ; CHECK: # %bb.0: # %entry
540 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
541 ; CHECK-NEXT: vwmulu.vv v10, v8, v9
542 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
543 ; CHECK-NEXT: vadd.vv v8, v10, v10
546 %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
547 %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
548 %ret = add <vscale x 1 x i64> %a, %b
549 ret <vscale x 1 x i64> %ret
552 declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
553 define <vscale x 1 x i64> @commutable_vwmulu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
554 ; CHECK-LABEL: commutable_vwmulu_vv_masked:
556 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
557 ; CHECK-NEXT: vwmulu.vv v10, v8, v9, v0.t
558 ; CHECK-NEXT: vwmulu.vv v11, v8, v9, v0.t
559 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
560 ; CHECK-NEXT: vadd.vv v8, v10, v11
562 %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
563 %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
564 %ret = add <vscale x 1 x i64> %a, %b
565 ret <vscale x 1 x i64> %ret
569 declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
570 define <vscale x 1 x i64> @commutable_vwmacc_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
571 ; CHECK-LABEL: commutable_vwmacc_vv:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
574 ; CHECK-NEXT: vwmacc.vv v10, v8, v9
575 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
576 ; CHECK-NEXT: vadd.vv v8, v10, v10
579 %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
580 %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
581 %ret = add <vscale x 1 x i64> %a, %b
582 ret <vscale x 1 x i64> %ret
585 declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
586 define <vscale x 1 x i64> @commutable_vwmacc_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
587 ; CHECK-LABEL: commutable_vwmacc_vv_masked:
589 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
590 ; CHECK-NEXT: vwmacc.vv v10, v8, v9, v0.t
591 ; CHECK-NEXT: vwmacc.vv v11, v9, v8, v0.t
592 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
593 ; CHECK-NEXT: vadd.vv v8, v10, v11
595 %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
596 %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
597 %ret = add <vscale x 1 x i64> %a, %b
598 ret <vscale x 1 x i64> %ret
602 declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
603 define <vscale x 1 x i64> @commutable_vwmaccu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
604 ; CHECK-LABEL: commutable_vwmaccu_vv:
605 ; CHECK: # %bb.0: # %entry
606 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
607 ; CHECK-NEXT: vwmaccu.vv v10, v8, v9
608 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
609 ; CHECK-NEXT: vadd.vv v8, v10, v10
612 %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
613 %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
614 %ret = add <vscale x 1 x i64> %a, %b
615 ret <vscale x 1 x i64> %ret
618 declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
619 define <vscale x 1 x i64> @commutable_vwmaccu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
620 ; CHECK-LABEL: commutable_vwmaccu_vv_masked:
622 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
623 ; CHECK-NEXT: vwmaccu.vv v10, v8, v9, v0.t
624 ; CHECK-NEXT: vwmaccu.vv v11, v9, v8, v0.t
625 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
626 ; CHECK-NEXT: vadd.vv v8, v10, v11
628 %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
629 %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
630 %ret = add <vscale x 1 x i64> %a, %b
631 ret <vscale x 1 x i64> %ret
635 declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
636 define <vscale x 1 x i64> @commutable_vadc_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) nounwind {
637 ; CHECK-LABEL: commutable_vadc_vv:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
640 ; CHECK-NEXT: vadc.vvm v10, v8, v9, v0
641 ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
642 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
643 ; CHECK-NEXT: vadd.vv v8, v10, v8
646 %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
647 %b = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
648 %ret = add <vscale x 1 x i64> %a, %b
649 ret <vscale x 1 x i64> %ret
653 declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
654 define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
655 ; CHECK-LABEL: commutable_vsadd_vv:
656 ; CHECK: # %bb.0: # %entry
657 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
658 ; CHECK-NEXT: vsadd.vv v8, v8, v9
659 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
660 ; CHECK-NEXT: vadd.vv v8, v8, v8
663 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
664 %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
665 %ret = add <vscale x 1 x i64> %a, %b
666 ret <vscale x 1 x i64> %ret
669 declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
670 define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
671 ; CHECK-LABEL: commutable_vsadd_vv_masked:
673 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
674 ; CHECK-NEXT: vsadd.vv v10, v8, v9, v0.t
675 ; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
676 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
677 ; CHECK-NEXT: vadd.vv v8, v10, v8
679 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
680 %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
681 %ret = add <vscale x 1 x i64> %a, %b
682 ret <vscale x 1 x i64> %ret
686 declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
687 define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
688 ; CHECK-LABEL: commutable_vsaddu_vv:
689 ; CHECK: # %bb.0: # %entry
690 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
691 ; CHECK-NEXT: vsaddu.vv v8, v8, v9
692 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
693 ; CHECK-NEXT: vadd.vv v8, v8, v8
696 %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
697 %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
698 %ret = add <vscale x 1 x i64> %a, %b
699 ret <vscale x 1 x i64> %ret
702 declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
703 define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
704 ; CHECK-LABEL: commutable_vsaddu_vv_masked:
706 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
707 ; CHECK-NEXT: vsaddu.vv v10, v8, v9, v0.t
708 ; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
709 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
710 ; CHECK-NEXT: vadd.vv v8, v10, v8
712 %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
713 %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
714 %ret = add <vscale x 1 x i64> %a, %b
715 ret <vscale x 1 x i64> %ret
719 declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
720 define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
721 ; CHECK-LABEL: commutable_vaadd_vv:
722 ; CHECK: # %bb.0: # %entry
723 ; CHECK-NEXT: csrwi vxrm, 0
724 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
725 ; CHECK-NEXT: vaadd.vv v8, v8, v9
726 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
727 ; CHECK-NEXT: vadd.vv v8, v8, v8
730 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
731 %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
732 %ret = add <vscale x 1 x i64> %a, %b
733 ret <vscale x 1 x i64> %ret
736 declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
737 define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
738 ; CHECK-LABEL: commutable_vaadd_vv_masked:
740 ; CHECK-NEXT: csrwi vxrm, 0
741 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
742 ; CHECK-NEXT: vaadd.vv v10, v8, v9, v0.t
743 ; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
744 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
745 ; CHECK-NEXT: vadd.vv v8, v10, v8
747 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
748 %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
749 %ret = add <vscale x 1 x i64> %a, %b
750 ret <vscale x 1 x i64> %ret
754 declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
755 define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
756 ; CHECK-LABEL: commutable_vaaddu_vv:
757 ; CHECK: # %bb.0: # %entry
758 ; CHECK-NEXT: csrwi vxrm, 0
759 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
760 ; CHECK-NEXT: vaaddu.vv v8, v8, v9
761 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
762 ; CHECK-NEXT: vadd.vv v8, v8, v8
765 %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
766 %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
767 %ret = add <vscale x 1 x i64> %a, %b
768 ret <vscale x 1 x i64> %ret
771 declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
772 define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
773 ; CHECK-LABEL: commutable_vaaddu_vv_masked:
775 ; CHECK-NEXT: csrwi vxrm, 0
776 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
777 ; CHECK-NEXT: vaaddu.vv v10, v8, v9, v0.t
778 ; CHECK-NEXT: vaaddu.vv v8, v8, v9, v0.t
779 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
780 ; CHECK-NEXT: vadd.vv v8, v10, v8
782 %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
783 %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
784 %ret = add <vscale x 1 x i64> %a, %b
785 ret <vscale x 1 x i64> %ret
789 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
790 define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
791 ; CHECK-LABEL: commutable_vsmul_vv:
792 ; CHECK: # %bb.0: # %entry
793 ; CHECK-NEXT: csrwi vxrm, 0
794 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
795 ; CHECK-NEXT: vsmul.vv v8, v8, v9
796 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
797 ; CHECK-NEXT: vadd.vv v8, v8, v8
800 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
801 %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
802 %ret = add <vscale x 1 x i64> %a, %b
803 ret <vscale x 1 x i64> %ret
806 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
807 define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
808 ; CHECK-LABEL: commutable_vsmul_vv_masked:
810 ; CHECK-NEXT: csrwi vxrm, 0
811 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
812 ; CHECK-NEXT: vsmul.vv v10, v8, v9, v0.t
813 ; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t
814 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
815 ; CHECK-NEXT: vadd.vv v8, v10, v8
817 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
818 %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
819 %ret = add <vscale x 1 x i64> %a, %b
820 ret <vscale x 1 x i64> %ret