1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
4 declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16>, <16 x i8>, <16 x i8>)
6 define <8 x i16> @lsx_vmaddwev_h_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
7 ; CHECK-LABEL: lsx_vmaddwev_h_b:
8 ; CHECK: # %bb.0: # %entry
9 ; CHECK-NEXT: vmaddwev.h.b $vr0, $vr1, $vr2
12 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
16 declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32>, <8 x i16>, <8 x i16>)
18 define <4 x i32> @lsx_vmaddwev_w_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
19 ; CHECK-LABEL: lsx_vmaddwev_w_h:
20 ; CHECK: # %bb.0: # %entry
21 ; CHECK-NEXT: vmaddwev.w.h $vr0, $vr1, $vr2
24 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
28 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64>, <4 x i32>, <4 x i32>)
30 define <2 x i64> @lsx_vmaddwev_d_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
31 ; CHECK-LABEL: lsx_vmaddwev_d_w:
32 ; CHECK: # %bb.0: # %entry
33 ; CHECK-NEXT: vmaddwev.d.w $vr0, $vr1, $vr2
36 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
40 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64>, <2 x i64>, <2 x i64>)
42 define <2 x i64> @lsx_vmaddwev_q_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
43 ; CHECK-LABEL: lsx_vmaddwev_q_d:
44 ; CHECK: # %bb.0: # %entry
45 ; CHECK-NEXT: vmaddwev.q.d $vr0, $vr1, $vr2
48 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
52 declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16>, <16 x i8>, <16 x i8>)
54 define <8 x i16> @lsx_vmaddwev_h_bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
55 ; CHECK-LABEL: lsx_vmaddwev_h_bu:
56 ; CHECK: # %bb.0: # %entry
57 ; CHECK-NEXT: vmaddwev.h.bu $vr0, $vr1, $vr2
60 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
64 declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32>, <8 x i16>, <8 x i16>)
66 define <4 x i32> @lsx_vmaddwev_w_hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
67 ; CHECK-LABEL: lsx_vmaddwev_w_hu:
68 ; CHECK: # %bb.0: # %entry
69 ; CHECK-NEXT: vmaddwev.w.hu $vr0, $vr1, $vr2
72 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
76 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64>, <4 x i32>, <4 x i32>)
78 define <2 x i64> @lsx_vmaddwev_d_wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
79 ; CHECK-LABEL: lsx_vmaddwev_d_wu:
80 ; CHECK: # %bb.0: # %entry
81 ; CHECK-NEXT: vmaddwev.d.wu $vr0, $vr1, $vr2
84 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
88 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64>, <2 x i64>, <2 x i64>)
90 define <2 x i64> @lsx_vmaddwev_q_du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
91 ; CHECK-LABEL: lsx_vmaddwev_q_du:
92 ; CHECK: # %bb.0: # %entry
93 ; CHECK-NEXT: vmaddwev.q.du $vr0, $vr1, $vr2
96 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
100 declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16>, <16 x i8>, <16 x i8>)
102 define <8 x i16> @lsx_vmaddwev_h_bu_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
103 ; CHECK-LABEL: lsx_vmaddwev_h_bu_b:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: vmaddwev.h.bu.b $vr0, $vr1, $vr2
108 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
112 declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32>, <8 x i16>, <8 x i16>)
114 define <4 x i32> @lsx_vmaddwev_w_hu_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
115 ; CHECK-LABEL: lsx_vmaddwev_w_hu_h:
116 ; CHECK: # %bb.0: # %entry
117 ; CHECK-NEXT: vmaddwev.w.hu.h $vr0, $vr1, $vr2
120 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
124 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64>, <4 x i32>, <4 x i32>)
126 define <2 x i64> @lsx_vmaddwev_d_wu_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
127 ; CHECK-LABEL: lsx_vmaddwev_d_wu_w:
128 ; CHECK: # %bb.0: # %entry
129 ; CHECK-NEXT: vmaddwev.d.wu.w $vr0, $vr1, $vr2
132 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
136 declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64>, <2 x i64>, <2 x i64>)
138 define <2 x i64> @lsx_vmaddwev_q_du_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
139 ; CHECK-LABEL: lsx_vmaddwev_q_du_d:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vmaddwev.q.du.d $vr0, $vr1, $vr2
144 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
148 declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16>, <16 x i8>, <16 x i8>)
150 define <8 x i16> @lsx_vmaddwod_h_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
151 ; CHECK-LABEL: lsx_vmaddwod_h_b:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vmaddwod.h.b $vr0, $vr1, $vr2
156 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
160 declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32>, <8 x i16>, <8 x i16>)
162 define <4 x i32> @lsx_vmaddwod_w_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
163 ; CHECK-LABEL: lsx_vmaddwod_w_h:
164 ; CHECK: # %bb.0: # %entry
165 ; CHECK-NEXT: vmaddwod.w.h $vr0, $vr1, $vr2
168 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
172 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64>, <4 x i32>, <4 x i32>)
174 define <2 x i64> @lsx_vmaddwod_d_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
175 ; CHECK-LABEL: lsx_vmaddwod_d_w:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vmaddwod.d.w $vr0, $vr1, $vr2
180 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
184 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64>, <2 x i64>, <2 x i64>)
186 define <2 x i64> @lsx_vmaddwod_q_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
187 ; CHECK-LABEL: lsx_vmaddwod_q_d:
188 ; CHECK: # %bb.0: # %entry
189 ; CHECK-NEXT: vmaddwod.q.d $vr0, $vr1, $vr2
192 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
196 declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16>, <16 x i8>, <16 x i8>)
198 define <8 x i16> @lsx_vmaddwod_h_bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
199 ; CHECK-LABEL: lsx_vmaddwod_h_bu:
200 ; CHECK: # %bb.0: # %entry
201 ; CHECK-NEXT: vmaddwod.h.bu $vr0, $vr1, $vr2
204 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
208 declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32>, <8 x i16>, <8 x i16>)
210 define <4 x i32> @lsx_vmaddwod_w_hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
211 ; CHECK-LABEL: lsx_vmaddwod_w_hu:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vmaddwod.w.hu $vr0, $vr1, $vr2
216 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
220 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64>, <4 x i32>, <4 x i32>)
222 define <2 x i64> @lsx_vmaddwod_d_wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
223 ; CHECK-LABEL: lsx_vmaddwod_d_wu:
224 ; CHECK: # %bb.0: # %entry
225 ; CHECK-NEXT: vmaddwod.d.wu $vr0, $vr1, $vr2
228 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
232 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64>, <2 x i64>, <2 x i64>)
234 define <2 x i64> @lsx_vmaddwod_q_du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
235 ; CHECK-LABEL: lsx_vmaddwod_q_du:
236 ; CHECK: # %bb.0: # %entry
237 ; CHECK-NEXT: vmaddwod.q.du $vr0, $vr1, $vr2
240 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
244 declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16>, <16 x i8>, <16 x i8>)
246 define <8 x i16> @lsx_vmaddwod_h_bu_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
247 ; CHECK-LABEL: lsx_vmaddwod_h_bu_b:
248 ; CHECK: # %bb.0: # %entry
249 ; CHECK-NEXT: vmaddwod.h.bu.b $vr0, $vr1, $vr2
252 %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
256 declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32>, <8 x i16>, <8 x i16>)
258 define <4 x i32> @lsx_vmaddwod_w_hu_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
259 ; CHECK-LABEL: lsx_vmaddwod_w_hu_h:
260 ; CHECK: # %bb.0: # %entry
261 ; CHECK-NEXT: vmaddwod.w.hu.h $vr0, $vr1, $vr2
264 %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
268 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64>, <4 x i32>, <4 x i32>)
270 define <2 x i64> @lsx_vmaddwod_d_wu_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
271 ; CHECK-LABEL: lsx_vmaddwod_d_wu_w:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vmaddwod.d.wu.w $vr0, $vr1, $vr2
276 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
280 declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64>, <2 x i64>, <2 x i64>)
282 define <2 x i64> @lsx_vmaddwod_q_du_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
283 ; CHECK-LABEL: lsx_vmaddwod_q_du_d:
284 ; CHECK: # %bb.0: # %entry
285 ; CHECK-NEXT: vmaddwod.q.du.d $vr0, $vr1, $vr2
288 %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)