1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple loongarch64 -target-feature +lsx -O2 -emit-llvm %s -o - | FileCheck %s
4 typedef signed char v16i8
__attribute__ ((vector_size(16), aligned(16)));
5 typedef signed char v16i8_b
__attribute__ ((vector_size(16), aligned(1)));
6 typedef unsigned char v16u8
__attribute__ ((vector_size(16), aligned(16)));
7 typedef unsigned char v16u8_b
__attribute__ ((vector_size(16), aligned(1)));
8 typedef short v8i16
__attribute__ ((vector_size(16), aligned(16)));
9 typedef short v8i16_h
__attribute__ ((vector_size(16), aligned(2)));
10 typedef unsigned short v8u16
__attribute__ ((vector_size(16), aligned(16)));
11 typedef unsigned short v8u16_h
__attribute__ ((vector_size(16), aligned(2)));
12 typedef int v4i32
__attribute__ ((vector_size(16), aligned(16)));
13 typedef int v4i32_w
__attribute__ ((vector_size(16), aligned(4)));
14 typedef unsigned int v4u32
__attribute__ ((vector_size(16), aligned(16)));
15 typedef unsigned int v4u32_w
__attribute__ ((vector_size(16), aligned(4)));
16 typedef long long v2i64
__attribute__ ((vector_size(16), aligned(16)));
17 typedef long long v2i64_d
__attribute__ ((vector_size(16), aligned(8)));
18 typedef unsigned long long v2u64
__attribute__ ((vector_size(16), aligned(16)));
19 typedef unsigned long long v2u64_d
__attribute__ ((vector_size(16), aligned(8)));
20 typedef float v4f32
__attribute__ ((vector_size(16), aligned(16)));
21 typedef float v4f32_w
__attribute__ ((vector_size(16), aligned(4)));
22 typedef double v2f64
__attribute__ ((vector_size(16), aligned(16)));
23 typedef double v2f64_d
__attribute__ ((vector_size(16), aligned(8)));
25 typedef long long __m128i
__attribute__ ((__vector_size__ (16), __may_alias__
));
26 typedef float __m128
__attribute__ ((__vector_size__ (16), __may_alias__
));
27 typedef double __m128d
__attribute__ ((__vector_size__ (16), __may_alias__
));
30 // CHECK-LABEL: @vsll_b(
32 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsll.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
33 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
35 v16i8
vsll_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vsll_b(_1
, _2
); }
36 // CHECK-LABEL: @vsll_h(
38 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsll.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
39 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
41 v8i16
vsll_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vsll_h(_1
, _2
); }
42 // CHECK-LABEL: @vsll_w(
44 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsll.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
45 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
47 v4i32
vsll_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vsll_w(_1
, _2
); }
48 // CHECK-LABEL: @vsll_d(
50 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsll.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
51 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
53 v2i64
vsll_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsll_d(_1
, _2
); }
54 // CHECK-LABEL: @vslli_b(
56 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslli.b(<16 x i8> [[_1:%.*]], i32 1)
57 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
59 v16i8
vslli_b(v16i8 _1
) { return __builtin_lsx_vslli_b(_1
, 1); }
60 // CHECK-LABEL: @vslli_h(
62 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslli.h(<8 x i16> [[_1:%.*]], i32 1)
63 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
65 v8i16
vslli_h(v8i16 _1
) { return __builtin_lsx_vslli_h(_1
, 1); }
66 // CHECK-LABEL: @vslli_w(
68 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslli.w(<4 x i32> [[_1:%.*]], i32 1)
69 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
71 v4i32
vslli_w(v4i32 _1
) { return __builtin_lsx_vslli_w(_1
, 1); }
72 // CHECK-LABEL: @vslli_d(
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslli.d(<2 x i64> [[_1:%.*]], i32 1)
75 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
77 v2i64
vslli_d(v2i64 _1
) { return __builtin_lsx_vslli_d(_1
, 1); }
78 // CHECK-LABEL: @vsra_b(
80 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsra.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
81 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
83 v16i8
vsra_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vsra_b(_1
, _2
); }
84 // CHECK-LABEL: @vsra_h(
86 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsra.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
87 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
89 v8i16
vsra_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vsra_h(_1
, _2
); }
90 // CHECK-LABEL: @vsra_w(
92 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsra.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
93 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
95 v4i32
vsra_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vsra_w(_1
, _2
); }
96 // CHECK-LABEL: @vsra_d(
98 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsra.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
99 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
101 v2i64
vsra_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsra_d(_1
, _2
); }
102 // CHECK-LABEL: @vsrai_b(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrai.b(<16 x i8> [[_1:%.*]], i32 1)
105 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
107 v16i8
vsrai_b(v16i8 _1
) { return __builtin_lsx_vsrai_b(_1
, 1); }
108 // CHECK-LABEL: @vsrai_h(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrai.h(<8 x i16> [[_1:%.*]], i32 1)
111 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
113 v8i16
vsrai_h(v8i16 _1
) { return __builtin_lsx_vsrai_h(_1
, 1); }
114 // CHECK-LABEL: @vsrai_w(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrai.w(<4 x i32> [[_1:%.*]], i32 1)
117 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
119 v4i32
vsrai_w(v4i32 _1
) { return __builtin_lsx_vsrai_w(_1
, 1); }
120 // CHECK-LABEL: @vsrai_d(
121 // CHECK-NEXT: entry:
122 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrai.d(<2 x i64> [[_1:%.*]], i32 1)
123 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
125 v2i64
vsrai_d(v2i64 _1
) { return __builtin_lsx_vsrai_d(_1
, 1); }
126 // CHECK-LABEL: @vsrar_b(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrar.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
129 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
131 v16i8
vsrar_b(v16i8 _1
, v16i8 _2
) {
132 return __builtin_lsx_vsrar_b(_1
, _2
);
134 // CHECK-LABEL: @vsrar_h(
135 // CHECK-NEXT: entry:
136 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrar.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
137 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
139 v8i16
vsrar_h(v8i16 _1
, v8i16 _2
) {
140 return __builtin_lsx_vsrar_h(_1
, _2
);
142 // CHECK-LABEL: @vsrar_w(
143 // CHECK-NEXT: entry:
144 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrar.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
145 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
147 v4i32
vsrar_w(v4i32 _1
, v4i32 _2
) {
148 return __builtin_lsx_vsrar_w(_1
, _2
);
150 // CHECK-LABEL: @vsrar_d(
151 // CHECK-NEXT: entry:
152 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrar.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
153 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
155 v2i64
vsrar_d(v2i64 _1
, v2i64 _2
) {
156 return __builtin_lsx_vsrar_d(_1
, _2
);
158 // CHECK-LABEL: @vsrari_b(
159 // CHECK-NEXT: entry:
160 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrari.b(<16 x i8> [[_1:%.*]], i32 1)
161 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
163 v16i8
vsrari_b(v16i8 _1
) { return __builtin_lsx_vsrari_b(_1
, 1); }
164 // CHECK-LABEL: @vsrari_h(
165 // CHECK-NEXT: entry:
166 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrari.h(<8 x i16> [[_1:%.*]], i32 1)
167 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
169 v8i16
vsrari_h(v8i16 _1
) { return __builtin_lsx_vsrari_h(_1
, 1); }
170 // CHECK-LABEL: @vsrari_w(
171 // CHECK-NEXT: entry:
172 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrari.w(<4 x i32> [[_1:%.*]], i32 1)
173 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
175 v4i32
vsrari_w(v4i32 _1
) { return __builtin_lsx_vsrari_w(_1
, 1); }
176 // CHECK-LABEL: @vsrari_d(
177 // CHECK-NEXT: entry:
178 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrari.d(<2 x i64> [[_1:%.*]], i32 1)
179 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
181 v2i64
vsrari_d(v2i64 _1
) { return __builtin_lsx_vsrari_d(_1
, 1); }
182 // CHECK-LABEL: @vsrl_b(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrl.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
185 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
187 v16i8
vsrl_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vsrl_b(_1
, _2
); }
188 // CHECK-LABEL: @vsrl_h(
189 // CHECK-NEXT: entry:
190 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrl.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
191 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
193 v8i16
vsrl_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vsrl_h(_1
, _2
); }
194 // CHECK-LABEL: @vsrl_w(
195 // CHECK-NEXT: entry:
196 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrl.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
197 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
199 v4i32
vsrl_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vsrl_w(_1
, _2
); }
200 // CHECK-LABEL: @vsrl_d(
201 // CHECK-NEXT: entry:
202 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrl.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
203 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
205 v2i64
vsrl_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsrl_d(_1
, _2
); }
206 // CHECK-LABEL: @vsrli_b(
207 // CHECK-NEXT: entry:
208 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrli.b(<16 x i8> [[_1:%.*]], i32 1)
209 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
211 v16i8
vsrli_b(v16i8 _1
) { return __builtin_lsx_vsrli_b(_1
, 1); }
212 // CHECK-LABEL: @vsrli_h(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrli.h(<8 x i16> [[_1:%.*]], i32 1)
215 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
217 v8i16
vsrli_h(v8i16 _1
) { return __builtin_lsx_vsrli_h(_1
, 1); }
218 // CHECK-LABEL: @vsrli_w(
219 // CHECK-NEXT: entry:
220 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrli.w(<4 x i32> [[_1:%.*]], i32 1)
221 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
223 v4i32
vsrli_w(v4i32 _1
) { return __builtin_lsx_vsrli_w(_1
, 1); }
224 // CHECK-LABEL: @vsrli_d(
225 // CHECK-NEXT: entry:
226 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrli.d(<2 x i64> [[_1:%.*]], i32 1)
227 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
229 v2i64
vsrli_d(v2i64 _1
) { return __builtin_lsx_vsrli_d(_1
, 1); }
230 // CHECK-LABEL: @vsrlr_b(
231 // CHECK-NEXT: entry:
232 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
233 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
235 v16i8
vsrlr_b(v16i8 _1
, v16i8 _2
) {
236 return __builtin_lsx_vsrlr_b(_1
, _2
);
238 // CHECK-LABEL: @vsrlr_h(
239 // CHECK-NEXT: entry:
240 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
241 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
243 v8i16
vsrlr_h(v8i16 _1
, v8i16 _2
) {
244 return __builtin_lsx_vsrlr_h(_1
, _2
);
246 // CHECK-LABEL: @vsrlr_w(
247 // CHECK-NEXT: entry:
248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
249 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
251 v4i32
vsrlr_w(v4i32 _1
, v4i32 _2
) {
252 return __builtin_lsx_vsrlr_w(_1
, _2
);
254 // CHECK-LABEL: @vsrlr_d(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
257 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
259 v2i64
vsrlr_d(v2i64 _1
, v2i64 _2
) {
260 return __builtin_lsx_vsrlr_d(_1
, _2
);
262 // CHECK-LABEL: @vsrlri_b(
263 // CHECK-NEXT: entry:
264 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8> [[_1:%.*]], i32 1)
265 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
267 v16i8
vsrlri_b(v16i8 _1
) { return __builtin_lsx_vsrlri_b(_1
, 1); }
268 // CHECK-LABEL: @vsrlri_h(
269 // CHECK-NEXT: entry:
270 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16> [[_1:%.*]], i32 1)
271 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
273 v8i16
vsrlri_h(v8i16 _1
) { return __builtin_lsx_vsrlri_h(_1
, 1); }
274 // CHECK-LABEL: @vsrlri_w(
275 // CHECK-NEXT: entry:
276 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32> [[_1:%.*]], i32 1)
277 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
279 v4i32
vsrlri_w(v4i32 _1
) { return __builtin_lsx_vsrlri_w(_1
, 1); }
280 // CHECK-LABEL: @vsrlri_d(
281 // CHECK-NEXT: entry:
282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64> [[_1:%.*]], i32 1)
283 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
285 v2i64
vsrlri_d(v2i64 _1
) { return __builtin_lsx_vsrlri_d(_1
, 1); }
286 // CHECK-LABEL: @vbitclr_b(
287 // CHECK-NEXT: entry:
288 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
289 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
291 v16u8
vbitclr_b(v16u8 _1
, v16u8 _2
) {
292 return __builtin_lsx_vbitclr_b(_1
, _2
);
294 // CHECK-LABEL: @vbitclr_h(
295 // CHECK-NEXT: entry:
296 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
297 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
299 v8u16
vbitclr_h(v8u16 _1
, v8u16 _2
) {
300 return __builtin_lsx_vbitclr_h(_1
, _2
);
302 // CHECK-LABEL: @vbitclr_w(
303 // CHECK-NEXT: entry:
304 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
305 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
307 v4u32
vbitclr_w(v4u32 _1
, v4u32 _2
) {
308 return __builtin_lsx_vbitclr_w(_1
, _2
);
310 // CHECK-LABEL: @vbitclr_d(
311 // CHECK-NEXT: entry:
312 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
313 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
315 v2u64
vbitclr_d(v2u64 _1
, v2u64 _2
) {
316 return __builtin_lsx_vbitclr_d(_1
, _2
);
318 // CHECK-LABEL: @vbitclri_b(
319 // CHECK-NEXT: entry:
320 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8> [[_1:%.*]], i32 1)
321 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
323 v16u8
vbitclri_b(v16u8 _1
) { return __builtin_lsx_vbitclri_b(_1
, 1); }
324 // CHECK-LABEL: @vbitclri_h(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16> [[_1:%.*]], i32 1)
327 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
329 v8u16
vbitclri_h(v8u16 _1
) { return __builtin_lsx_vbitclri_h(_1
, 1); }
330 // CHECK-LABEL: @vbitclri_w(
331 // CHECK-NEXT: entry:
332 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32> [[_1:%.*]], i32 1)
333 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
335 v4u32
vbitclri_w(v4u32 _1
) { return __builtin_lsx_vbitclri_w(_1
, 1); }
336 // CHECK-LABEL: @vbitclri_d(
337 // CHECK-NEXT: entry:
338 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64> [[_1:%.*]], i32 1)
339 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
341 v2u64
vbitclri_d(v2u64 _1
) { return __builtin_lsx_vbitclri_d(_1
, 1); }
342 // CHECK-LABEL: @vbitset_b(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitset.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
345 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
347 v16u8
vbitset_b(v16u8 _1
, v16u8 _2
) {
348 return __builtin_lsx_vbitset_b(_1
, _2
);
350 // CHECK-LABEL: @vbitset_h(
351 // CHECK-NEXT: entry:
352 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitset.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
353 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
355 v8u16
vbitset_h(v8u16 _1
, v8u16 _2
) {
356 return __builtin_lsx_vbitset_h(_1
, _2
);
358 // CHECK-LABEL: @vbitset_w(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitset.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
361 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
363 v4u32
vbitset_w(v4u32 _1
, v4u32 _2
) {
364 return __builtin_lsx_vbitset_w(_1
, _2
);
366 // CHECK-LABEL: @vbitset_d(
367 // CHECK-NEXT: entry:
368 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitset.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
369 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
371 v2u64
vbitset_d(v2u64 _1
, v2u64 _2
) {
372 return __builtin_lsx_vbitset_d(_1
, _2
);
374 // CHECK-LABEL: @vbitseti_b(
375 // CHECK-NEXT: entry:
376 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitseti.b(<16 x i8> [[_1:%.*]], i32 1)
377 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
379 v16u8
vbitseti_b(v16u8 _1
) { return __builtin_lsx_vbitseti_b(_1
, 1); }
380 // CHECK-LABEL: @vbitseti_h(
381 // CHECK-NEXT: entry:
382 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitseti.h(<8 x i16> [[_1:%.*]], i32 1)
383 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
385 v8u16
vbitseti_h(v8u16 _1
) { return __builtin_lsx_vbitseti_h(_1
, 1); }
386 // CHECK-LABEL: @vbitseti_w(
387 // CHECK-NEXT: entry:
388 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitseti.w(<4 x i32> [[_1:%.*]], i32 1)
389 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
391 v4u32
vbitseti_w(v4u32 _1
) { return __builtin_lsx_vbitseti_w(_1
, 1); }
392 // CHECK-LABEL: @vbitseti_d(
393 // CHECK-NEXT: entry:
394 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitseti.d(<2 x i64> [[_1:%.*]], i32 1)
395 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
397 v2u64
vbitseti_d(v2u64 _1
) { return __builtin_lsx_vbitseti_d(_1
, 1); }
398 // CHECK-LABEL: @vbitrev_b(
399 // CHECK-NEXT: entry:
400 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
401 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
403 v16u8
vbitrev_b(v16u8 _1
, v16u8 _2
) {
404 return __builtin_lsx_vbitrev_b(_1
, _2
);
406 // CHECK-LABEL: @vbitrev_h(
407 // CHECK-NEXT: entry:
408 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
409 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
411 v8u16
vbitrev_h(v8u16 _1
, v8u16 _2
) {
412 return __builtin_lsx_vbitrev_h(_1
, _2
);
414 // CHECK-LABEL: @vbitrev_w(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
417 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
419 v4u32
vbitrev_w(v4u32 _1
, v4u32 _2
) {
420 return __builtin_lsx_vbitrev_w(_1
, _2
);
422 // CHECK-LABEL: @vbitrev_d(
423 // CHECK-NEXT: entry:
424 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
425 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
427 v2u64
vbitrev_d(v2u64 _1
, v2u64 _2
) {
428 return __builtin_lsx_vbitrev_d(_1
, _2
);
430 // CHECK-LABEL: @vbitrevi_b(
431 // CHECK-NEXT: entry:
432 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8> [[_1:%.*]], i32 1)
433 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
435 v16u8
vbitrevi_b(v16u8 _1
) { return __builtin_lsx_vbitrevi_b(_1
, 1); }
436 // CHECK-LABEL: @vbitrevi_h(
437 // CHECK-NEXT: entry:
438 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16> [[_1:%.*]], i32 1)
439 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
441 v8u16
vbitrevi_h(v8u16 _1
) { return __builtin_lsx_vbitrevi_h(_1
, 1); }
442 // CHECK-LABEL: @vbitrevi_w(
443 // CHECK-NEXT: entry:
444 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32> [[_1:%.*]], i32 1)
445 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
447 v4u32
vbitrevi_w(v4u32 _1
) { return __builtin_lsx_vbitrevi_w(_1
, 1); }
448 // CHECK-LABEL: @vbitrevi_d(
449 // CHECK-NEXT: entry:
450 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64> [[_1:%.*]], i32 1)
451 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
453 v2u64
vbitrevi_d(v2u64 _1
) { return __builtin_lsx_vbitrevi_d(_1
, 1); }
454 // CHECK-LABEL: @vadd_b(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vadd.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
457 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
459 v16i8
vadd_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vadd_b(_1
, _2
); }
460 // CHECK-LABEL: @vadd_h(
461 // CHECK-NEXT: entry:
462 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vadd.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
463 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
465 v8i16
vadd_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vadd_h(_1
, _2
); }
466 // CHECK-LABEL: @vadd_w(
467 // CHECK-NEXT: entry:
468 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vadd.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
469 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
471 v4i32
vadd_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vadd_w(_1
, _2
); }
472 // CHECK-LABEL: @vadd_d(
473 // CHECK-NEXT: entry:
474 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadd.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
475 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
477 v2i64
vadd_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vadd_d(_1
, _2
); }
478 // CHECK-LABEL: @vaddi_bu(
479 // CHECK-NEXT: entry:
480 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vaddi.bu(<16 x i8> [[_1:%.*]], i32 1)
481 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
483 v16i8
vaddi_bu(v16i8 _1
) { return __builtin_lsx_vaddi_bu(_1
, 1); }
484 // CHECK-LABEL: @vaddi_hu(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddi.hu(<8 x i16> [[_1:%.*]], i32 1)
487 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
489 v8i16
vaddi_hu(v8i16 _1
) { return __builtin_lsx_vaddi_hu(_1
, 1); }
490 // CHECK-LABEL: @vaddi_wu(
491 // CHECK-NEXT: entry:
492 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddi.wu(<4 x i32> [[_1:%.*]], i32 1)
493 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
495 v4i32
vaddi_wu(v4i32 _1
) { return __builtin_lsx_vaddi_wu(_1
, 1); }
496 // CHECK-LABEL: @vaddi_du(
497 // CHECK-NEXT: entry:
498 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddi.du(<2 x i64> [[_1:%.*]], i32 1)
499 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
501 v2i64
vaddi_du(v2i64 _1
) { return __builtin_lsx_vaddi_du(_1
, 1); }
502 // CHECK-LABEL: @vsub_b(
503 // CHECK-NEXT: entry:
504 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsub.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
505 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
507 v16i8
vsub_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vsub_b(_1
, _2
); }
508 // CHECK-LABEL: @vsub_h(
509 // CHECK-NEXT: entry:
510 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsub.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
511 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
513 v8i16
vsub_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vsub_h(_1
, _2
); }
514 // CHECK-LABEL: @vsub_w(
515 // CHECK-NEXT: entry:
516 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsub.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
517 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
519 v4i32
vsub_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vsub_w(_1
, _2
); }
520 // CHECK-LABEL: @vsub_d(
521 // CHECK-NEXT: entry:
522 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsub.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
523 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
525 v2i64
vsub_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsub_d(_1
, _2
); }
526 // CHECK-LABEL: @vsubi_bu(
527 // CHECK-NEXT: entry:
528 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsubi.bu(<16 x i8> [[_1:%.*]], i32 1)
529 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
531 v16i8
vsubi_bu(v16i8 _1
) { return __builtin_lsx_vsubi_bu(_1
, 1); }
532 // CHECK-LABEL: @vsubi_hu(
533 // CHECK-NEXT: entry:
534 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubi.hu(<8 x i16> [[_1:%.*]], i32 1)
535 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
537 v8i16
vsubi_hu(v8i16 _1
) { return __builtin_lsx_vsubi_hu(_1
, 1); }
538 // CHECK-LABEL: @vsubi_wu(
539 // CHECK-NEXT: entry:
540 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubi.wu(<4 x i32> [[_1:%.*]], i32 1)
541 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
543 v4i32
vsubi_wu(v4i32 _1
) { return __builtin_lsx_vsubi_wu(_1
, 1); }
544 // CHECK-LABEL: @vsubi_du(
545 // CHECK-NEXT: entry:
546 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubi.du(<2 x i64> [[_1:%.*]], i32 1)
547 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
549 v2i64
vsubi_du(v2i64 _1
) { return __builtin_lsx_vsubi_du(_1
, 1); }
550 // CHECK-LABEL: @vmax_b(
551 // CHECK-NEXT: entry:
552 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmax.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
553 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
555 v16i8
vmax_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vmax_b(_1
, _2
); }
556 // CHECK-LABEL: @vmax_h(
557 // CHECK-NEXT: entry:
558 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmax.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
559 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
561 v8i16
vmax_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vmax_h(_1
, _2
); }
562 // CHECK-LABEL: @vmax_w(
563 // CHECK-NEXT: entry:
564 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmax.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
565 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
567 v4i32
vmax_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vmax_w(_1
, _2
); }
568 // CHECK-LABEL: @vmax_d(
569 // CHECK-NEXT: entry:
570 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmax.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
571 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
573 v2i64
vmax_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vmax_d(_1
, _2
); }
574 // CHECK-LABEL: @vmaxi_b(
575 // CHECK-NEXT: entry:
576 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmaxi.b(<16 x i8> [[_1:%.*]], i32 1)
577 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
579 v16i8
vmaxi_b(v16i8 _1
) { return __builtin_lsx_vmaxi_b(_1
, 1); }
580 // CHECK-LABEL: @vmaxi_h(
581 // CHECK-NEXT: entry:
582 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaxi.h(<8 x i16> [[_1:%.*]], i32 1)
583 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
585 v8i16
vmaxi_h(v8i16 _1
) { return __builtin_lsx_vmaxi_h(_1
, 1); }
586 // CHECK-LABEL: @vmaxi_w(
587 // CHECK-NEXT: entry:
588 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaxi.w(<4 x i32> [[_1:%.*]], i32 1)
589 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
591 v4i32
vmaxi_w(v4i32 _1
) { return __builtin_lsx_vmaxi_w(_1
, 1); }
592 // CHECK-LABEL: @vmaxi_d(
593 // CHECK-NEXT: entry:
594 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaxi.d(<2 x i64> [[_1:%.*]], i32 1)
595 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
597 v2i64
vmaxi_d(v2i64 _1
) { return __builtin_lsx_vmaxi_d(_1
, 1); }
598 // CHECK-LABEL: @vmax_bu(
599 // CHECK-NEXT: entry:
600 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmax.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
601 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
603 v16u8
vmax_bu(v16u8 _1
, v16u8 _2
) {
604 return __builtin_lsx_vmax_bu(_1
, _2
);
606 // CHECK-LABEL: @vmax_hu(
607 // CHECK-NEXT: entry:
608 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmax.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
609 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
611 v8u16
vmax_hu(v8u16 _1
, v8u16 _2
) {
612 return __builtin_lsx_vmax_hu(_1
, _2
);
614 // CHECK-LABEL: @vmax_wu(
615 // CHECK-NEXT: entry:
616 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmax.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
617 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
619 v4u32
vmax_wu(v4u32 _1
, v4u32 _2
) {
620 return __builtin_lsx_vmax_wu(_1
, _2
);
622 // CHECK-LABEL: @vmax_du(
623 // CHECK-NEXT: entry:
624 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmax.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
625 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
627 v2u64
vmax_du(v2u64 _1
, v2u64 _2
) {
628 return __builtin_lsx_vmax_du(_1
, _2
);
630 // CHECK-LABEL: @vmaxi_bu(
631 // CHECK-NEXT: entry:
632 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmaxi.bu(<16 x i8> [[_1:%.*]], i32 1)
633 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
635 v16u8
vmaxi_bu(v16u8 _1
) { return __builtin_lsx_vmaxi_bu(_1
, 1); }
636 // CHECK-LABEL: @vmaxi_hu(
637 // CHECK-NEXT: entry:
638 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaxi.hu(<8 x i16> [[_1:%.*]], i32 1)
639 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
641 v8u16
vmaxi_hu(v8u16 _1
) { return __builtin_lsx_vmaxi_hu(_1
, 1); }
642 // CHECK-LABEL: @vmaxi_wu(
643 // CHECK-NEXT: entry:
644 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaxi.wu(<4 x i32> [[_1:%.*]], i32 1)
645 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
647 v4u32
vmaxi_wu(v4u32 _1
) { return __builtin_lsx_vmaxi_wu(_1
, 1); }
648 // CHECK-LABEL: @vmaxi_du(
649 // CHECK-NEXT: entry:
650 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaxi.du(<2 x i64> [[_1:%.*]], i32 1)
651 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
653 v2u64
vmaxi_du(v2u64 _1
) { return __builtin_lsx_vmaxi_du(_1
, 1); }
654 // CHECK-LABEL: @vmin_b(
655 // CHECK-NEXT: entry:
656 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmin.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
657 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
659 v16i8
vmin_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vmin_b(_1
, _2
); }
660 // CHECK-LABEL: @vmin_h(
661 // CHECK-NEXT: entry:
662 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmin.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
663 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
665 v8i16
vmin_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vmin_h(_1
, _2
); }
666 // CHECK-LABEL: @vmin_w(
667 // CHECK-NEXT: entry:
668 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmin.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
669 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
671 v4i32
vmin_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vmin_w(_1
, _2
); }
672 // CHECK-LABEL: @vmin_d(
673 // CHECK-NEXT: entry:
674 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmin.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
675 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
677 v2i64
vmin_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vmin_d(_1
, _2
); }
678 // CHECK-LABEL: @vmini_b(
679 // CHECK-NEXT: entry:
680 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmini.b(<16 x i8> [[_1:%.*]], i32 1)
681 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
683 v16i8
vmini_b(v16i8 _1
) { return __builtin_lsx_vmini_b(_1
, 1); }
684 // CHECK-LABEL: @vmini_h(
685 // CHECK-NEXT: entry:
686 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmini.h(<8 x i16> [[_1:%.*]], i32 1)
687 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
689 v8i16
vmini_h(v8i16 _1
) { return __builtin_lsx_vmini_h(_1
, 1); }
690 // CHECK-LABEL: @vmini_w(
691 // CHECK-NEXT: entry:
692 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmini.w(<4 x i32> [[_1:%.*]], i32 1)
693 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
695 v4i32
vmini_w(v4i32 _1
) { return __builtin_lsx_vmini_w(_1
, 1); }
696 // CHECK-LABEL: @vmini_d(
697 // CHECK-NEXT: entry:
698 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmini.d(<2 x i64> [[_1:%.*]], i32 1)
699 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
701 v2i64
vmini_d(v2i64 _1
) { return __builtin_lsx_vmini_d(_1
, 1); }
702 // CHECK-LABEL: @vmin_bu(
703 // CHECK-NEXT: entry:
704 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmin.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
705 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
707 v16u8
vmin_bu(v16u8 _1
, v16u8 _2
) {
708 return __builtin_lsx_vmin_bu(_1
, _2
);
710 // CHECK-LABEL: @vmin_hu(
711 // CHECK-NEXT: entry:
712 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmin.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
713 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
715 v8u16
vmin_hu(v8u16 _1
, v8u16 _2
) {
716 return __builtin_lsx_vmin_hu(_1
, _2
);
718 // CHECK-LABEL: @vmin_wu(
719 // CHECK-NEXT: entry:
720 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmin.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
721 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
723 v4u32
vmin_wu(v4u32 _1
, v4u32 _2
) {
724 return __builtin_lsx_vmin_wu(_1
, _2
);
726 // CHECK-LABEL: @vmin_du(
727 // CHECK-NEXT: entry:
728 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmin.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
729 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
731 v2u64
vmin_du(v2u64 _1
, v2u64 _2
) {
732 return __builtin_lsx_vmin_du(_1
, _2
);
734 // CHECK-LABEL: @vmini_bu(
735 // CHECK-NEXT: entry:
736 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmini.bu(<16 x i8> [[_1:%.*]], i32 1)
737 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
739 v16u8
vmini_bu(v16u8 _1
) { return __builtin_lsx_vmini_bu(_1
, 1); }
740 // CHECK-LABEL: @vmini_hu(
741 // CHECK-NEXT: entry:
742 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmini.hu(<8 x i16> [[_1:%.*]], i32 1)
743 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
745 v8u16
vmini_hu(v8u16 _1
) { return __builtin_lsx_vmini_hu(_1
, 1); }
746 // CHECK-LABEL: @vmini_wu(
747 // CHECK-NEXT: entry:
748 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmini.wu(<4 x i32> [[_1:%.*]], i32 1)
749 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
751 v4u32
vmini_wu(v4u32 _1
) { return __builtin_lsx_vmini_wu(_1
, 1); }
752 // CHECK-LABEL: @vmini_du(
753 // CHECK-NEXT: entry:
754 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmini.du(<2 x i64> [[_1:%.*]], i32 1)
755 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
757 v2u64
vmini_du(v2u64 _1
) { return __builtin_lsx_vmini_du(_1
, 1); }
758 // CHECK-LABEL: @vseq_b(
759 // CHECK-NEXT: entry:
760 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
761 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
763 v16i8
vseq_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vseq_b(_1
, _2
); }
764 // CHECK-LABEL: @vseq_h(
765 // CHECK-NEXT: entry:
766 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
767 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
769 v8i16
vseq_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vseq_h(_1
, _2
); }
770 // CHECK-LABEL: @vseq_w(
771 // CHECK-NEXT: entry:
772 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
773 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
775 v4i32
vseq_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vseq_w(_1
, _2
); }
776 // CHECK-LABEL: @vseq_d(
777 // CHECK-NEXT: entry:
778 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
779 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
781 v2i64
vseq_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vseq_d(_1
, _2
); }
782 // CHECK-LABEL: @vseqi_b(
783 // CHECK-NEXT: entry:
784 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8> [[_1:%.*]], i32 1)
785 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
787 v16i8
vseqi_b(v16i8 _1
) { return __builtin_lsx_vseqi_b(_1
, 1); }
788 // CHECK-LABEL: @vseqi_h(
789 // CHECK-NEXT: entry:
790 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16> [[_1:%.*]], i32 1)
791 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
793 v8i16
vseqi_h(v8i16 _1
) { return __builtin_lsx_vseqi_h(_1
, 1); }
794 // CHECK-LABEL: @vseqi_w(
795 // CHECK-NEXT: entry:
796 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32> [[_1:%.*]], i32 1)
797 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
799 v4i32
vseqi_w(v4i32 _1
) { return __builtin_lsx_vseqi_w(_1
, 1); }
800 // CHECK-LABEL: @vseqi_d(
801 // CHECK-NEXT: entry:
802 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64> [[_1:%.*]], i32 1)
803 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
805 v2i64
vseqi_d(v2i64 _1
) { return __builtin_lsx_vseqi_d(_1
, 1); }
806 // CHECK-LABEL: @vslti_b(
807 // CHECK-NEXT: entry:
808 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8> [[_1:%.*]], i32 1)
809 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
811 v16i8
vslti_b(v16i8 _1
) { return __builtin_lsx_vslti_b(_1
, 1); }
812 // CHECK-LABEL: @vslt_b(
813 // CHECK-NEXT: entry:
814 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
815 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
817 v16i8
vslt_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vslt_b(_1
, _2
); }
818 // CHECK-LABEL: @vslt_h(
819 // CHECK-NEXT: entry:
820 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
821 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
823 v8i16
vslt_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vslt_h(_1
, _2
); }
824 // CHECK-LABEL: @vslt_w(
825 // CHECK-NEXT: entry:
826 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
827 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
829 v4i32
vslt_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vslt_w(_1
, _2
); }
830 // CHECK-LABEL: @vslt_d(
831 // CHECK-NEXT: entry:
832 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
833 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
835 v2i64
vslt_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vslt_d(_1
, _2
); }
836 // CHECK-LABEL: @vslti_h(
837 // CHECK-NEXT: entry:
838 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16> [[_1:%.*]], i32 1)
839 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
841 v8i16
vslti_h(v8i16 _1
) { return __builtin_lsx_vslti_h(_1
, 1); }
842 // CHECK-LABEL: @vslti_w(
843 // CHECK-NEXT: entry:
844 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32> [[_1:%.*]], i32 1)
845 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
847 v4i32
vslti_w(v4i32 _1
) { return __builtin_lsx_vslti_w(_1
, 1); }
848 // CHECK-LABEL: @vslti_d(
849 // CHECK-NEXT: entry:
850 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64> [[_1:%.*]], i32 1)
851 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
853 v2i64
vslti_d(v2i64 _1
) { return __builtin_lsx_vslti_d(_1
, 1); }
854 // CHECK-LABEL: @vslt_bu(
855 // CHECK-NEXT: entry:
856 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
857 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
859 v16i8
vslt_bu(v16u8 _1
, v16u8 _2
) {
860 return __builtin_lsx_vslt_bu(_1
, _2
);
862 // CHECK-LABEL: @vslt_hu(
863 // CHECK-NEXT: entry:
864 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
865 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
867 v8i16
vslt_hu(v8u16 _1
, v8u16 _2
) {
868 return __builtin_lsx_vslt_hu(_1
, _2
);
870 // CHECK-LABEL: @vslt_wu(
871 // CHECK-NEXT: entry:
872 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
873 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
875 v4i32
vslt_wu(v4u32 _1
, v4u32 _2
) {
876 return __builtin_lsx_vslt_wu(_1
, _2
);
878 // CHECK-LABEL: @vslt_du(
879 // CHECK-NEXT: entry:
880 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
881 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
883 v2i64
vslt_du(v2u64 _1
, v2u64 _2
) {
884 return __builtin_lsx_vslt_du(_1
, _2
);
886 // CHECK-LABEL: @vslti_bu(
887 // CHECK-NEXT: entry:
888 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8> [[_1:%.*]], i32 1)
889 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
891 v16i8
vslti_bu(v16u8 _1
) { return __builtin_lsx_vslti_bu(_1
, 1); }
892 // CHECK-LABEL: @vslti_hu(
893 // CHECK-NEXT: entry:
894 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16> [[_1:%.*]], i32 1)
895 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
897 v8i16
vslti_hu(v8u16 _1
) { return __builtin_lsx_vslti_hu(_1
, 1); }
898 // CHECK-LABEL: @vslti_wu(
899 // CHECK-NEXT: entry:
900 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32> [[_1:%.*]], i32 1)
901 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
903 v4i32
vslti_wu(v4u32 _1
) { return __builtin_lsx_vslti_wu(_1
, 1); }
904 // CHECK-LABEL: @vslti_du(
905 // CHECK-NEXT: entry:
906 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64> [[_1:%.*]], i32 1)
907 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
909 v2i64
vslti_du(v2u64 _1
) { return __builtin_lsx_vslti_du(_1
, 1); }
910 // CHECK-LABEL: @vsle_b(
911 // CHECK-NEXT: entry:
912 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsle.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
913 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
915 v16i8
vsle_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vsle_b(_1
, _2
); }
916 // CHECK-LABEL: @vsle_h(
917 // CHECK-NEXT: entry:
918 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsle.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
919 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
921 v8i16
vsle_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vsle_h(_1
, _2
); }
922 // CHECK-LABEL: @vsle_w(
923 // CHECK-NEXT: entry:
924 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsle.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
925 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
927 v4i32
vsle_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vsle_w(_1
, _2
); }
928 // CHECK-LABEL: @vsle_d(
929 // CHECK-NEXT: entry:
930 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsle.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
931 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
933 v2i64
vsle_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsle_d(_1
, _2
); }
934 // CHECK-LABEL: @vslei_b(
935 // CHECK-NEXT: entry:
936 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslei.b(<16 x i8> [[_1:%.*]], i32 1)
937 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
939 v16i8
vslei_b(v16i8 _1
) { return __builtin_lsx_vslei_b(_1
, 1); }
940 // CHECK-LABEL: @vslei_h(
941 // CHECK-NEXT: entry:
942 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslei.h(<8 x i16> [[_1:%.*]], i32 1)
943 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
945 v8i16
vslei_h(v8i16 _1
) { return __builtin_lsx_vslei_h(_1
, 1); }
946 // CHECK-LABEL: @vslei_w(
947 // CHECK-NEXT: entry:
948 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslei.w(<4 x i32> [[_1:%.*]], i32 1)
949 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
951 v4i32
vslei_w(v4i32 _1
) { return __builtin_lsx_vslei_w(_1
, 1); }
952 // CHECK-LABEL: @vslei_d(
953 // CHECK-NEXT: entry:
954 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslei.d(<2 x i64> [[_1:%.*]], i32 1)
955 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
957 v2i64
vslei_d(v2i64 _1
) { return __builtin_lsx_vslei_d(_1
, 1); }
958 // CHECK-LABEL: @vsle_bu(
959 // CHECK-NEXT: entry:
960 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsle.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
961 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
963 v16i8
vsle_bu(v16u8 _1
, v16u8 _2
) {
964 return __builtin_lsx_vsle_bu(_1
, _2
);
966 // CHECK-LABEL: @vsle_hu(
967 // CHECK-NEXT: entry:
968 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsle.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
969 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
971 v8i16
vsle_hu(v8u16 _1
, v8u16 _2
) {
972 return __builtin_lsx_vsle_hu(_1
, _2
);
974 // CHECK-LABEL: @vsle_wu(
975 // CHECK-NEXT: entry:
976 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsle.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
977 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
979 v4i32
vsle_wu(v4u32 _1
, v4u32 _2
) {
980 return __builtin_lsx_vsle_wu(_1
, _2
);
982 // CHECK-LABEL: @vsle_du(
983 // CHECK-NEXT: entry:
984 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsle.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
985 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
987 v2i64
vsle_du(v2u64 _1
, v2u64 _2
) {
988 return __builtin_lsx_vsle_du(_1
, _2
);
990 // CHECK-LABEL: @vslei_bu(
991 // CHECK-NEXT: entry:
992 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vslei.bu(<16 x i8> [[_1:%.*]], i32 1)
993 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
995 v16i8
vslei_bu(v16u8 _1
) { return __builtin_lsx_vslei_bu(_1
, 1); }
996 // CHECK-LABEL: @vslei_hu(
997 // CHECK-NEXT: entry:
998 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vslei.hu(<8 x i16> [[_1:%.*]], i32 1)
999 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1001 v8i16
vslei_hu(v8u16 _1
) { return __builtin_lsx_vslei_hu(_1
, 1); }
1002 // CHECK-LABEL: @vslei_wu(
1003 // CHECK-NEXT: entry:
1004 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vslei.wu(<4 x i32> [[_1:%.*]], i32 1)
1005 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1007 v4i32
vslei_wu(v4u32 _1
) { return __builtin_lsx_vslei_wu(_1
, 1); }
1008 // CHECK-LABEL: @vslei_du(
1009 // CHECK-NEXT: entry:
1010 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vslei.du(<2 x i64> [[_1:%.*]], i32 1)
1011 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1013 v2i64
vslei_du(v2u64 _1
) { return __builtin_lsx_vslei_du(_1
, 1); }
1014 // CHECK-LABEL: @vsat_b(
1015 // CHECK-NEXT: entry:
1016 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsat.b(<16 x i8> [[_1:%.*]], i32 1)
1017 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1019 v16i8
vsat_b(v16i8 _1
) { return __builtin_lsx_vsat_b(_1
, 1); }
1020 // CHECK-LABEL: @vsat_h(
1021 // CHECK-NEXT: entry:
1022 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsat.h(<8 x i16> [[_1:%.*]], i32 1)
1023 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1025 v8i16
vsat_h(v8i16 _1
) { return __builtin_lsx_vsat_h(_1
, 1); }
1026 // CHECK-LABEL: @vsat_w(
1027 // CHECK-NEXT: entry:
1028 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsat.w(<4 x i32> [[_1:%.*]], i32 1)
1029 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1031 v4i32
vsat_w(v4i32 _1
) { return __builtin_lsx_vsat_w(_1
, 1); }
1032 // CHECK-LABEL: @vsat_d(
1033 // CHECK-NEXT: entry:
1034 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsat.d(<2 x i64> [[_1:%.*]], i32 1)
1035 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1037 v2i64
vsat_d(v2i64 _1
) { return __builtin_lsx_vsat_d(_1
, 1); }
1038 // CHECK-LABEL: @vsat_bu(
1039 // CHECK-NEXT: entry:
1040 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsat.bu(<16 x i8> [[_1:%.*]], i32 1)
1041 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1043 v16u8
vsat_bu(v16u8 _1
) { return __builtin_lsx_vsat_bu(_1
, 1); }
1044 // CHECK-LABEL: @vsat_hu(
1045 // CHECK-NEXT: entry:
1046 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsat.hu(<8 x i16> [[_1:%.*]], i32 1)
1047 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1049 v8u16
vsat_hu(v8u16 _1
) { return __builtin_lsx_vsat_hu(_1
, 1); }
1050 // CHECK-LABEL: @vsat_wu(
1051 // CHECK-NEXT: entry:
1052 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsat.wu(<4 x i32> [[_1:%.*]], i32 1)
1053 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1055 v4u32
vsat_wu(v4u32 _1
) { return __builtin_lsx_vsat_wu(_1
, 1); }
1056 // CHECK-LABEL: @vsat_du(
1057 // CHECK-NEXT: entry:
1058 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsat.du(<2 x i64> [[_1:%.*]], i32 1)
1059 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1061 v2u64
vsat_du(v2u64 _1
) { return __builtin_lsx_vsat_du(_1
, 1); }
1062 // CHECK-LABEL: @vadda_b(
1063 // CHECK-NEXT: entry:
1064 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vadda.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1065 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1067 v16i8
vadda_b(v16i8 _1
, v16i8 _2
) {
1068 return __builtin_lsx_vadda_b(_1
, _2
);
1070 // CHECK-LABEL: @vadda_h(
1071 // CHECK-NEXT: entry:
1072 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vadda.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1073 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1075 v8i16
vadda_h(v8i16 _1
, v8i16 _2
) {
1076 return __builtin_lsx_vadda_h(_1
, _2
);
1078 // CHECK-LABEL: @vadda_w(
1079 // CHECK-NEXT: entry:
1080 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vadda.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1081 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1083 v4i32
vadda_w(v4i32 _1
, v4i32 _2
) {
1084 return __builtin_lsx_vadda_w(_1
, _2
);
1086 // CHECK-LABEL: @vadda_d(
1087 // CHECK-NEXT: entry:
1088 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadda.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1089 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1091 v2i64
vadda_d(v2i64 _1
, v2i64 _2
) {
1092 return __builtin_lsx_vadda_d(_1
, _2
);
1094 // CHECK-LABEL: @vsadd_b(
1095 // CHECK-NEXT: entry:
1096 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsadd.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1097 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1099 v16i8
vsadd_b(v16i8 _1
, v16i8 _2
) {
1100 return __builtin_lsx_vsadd_b(_1
, _2
);
1102 // CHECK-LABEL: @vsadd_h(
1103 // CHECK-NEXT: entry:
1104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsadd.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1105 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1107 v8i16
vsadd_h(v8i16 _1
, v8i16 _2
) {
1108 return __builtin_lsx_vsadd_h(_1
, _2
);
1110 // CHECK-LABEL: @vsadd_w(
1111 // CHECK-NEXT: entry:
1112 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsadd.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1113 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1115 v4i32
vsadd_w(v4i32 _1
, v4i32 _2
) {
1116 return __builtin_lsx_vsadd_w(_1
, _2
);
1118 // CHECK-LABEL: @vsadd_d(
1119 // CHECK-NEXT: entry:
1120 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsadd.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1121 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1123 v2i64
vsadd_d(v2i64 _1
, v2i64 _2
) {
1124 return __builtin_lsx_vsadd_d(_1
, _2
);
1126 // CHECK-LABEL: @vsadd_bu(
1127 // CHECK-NEXT: entry:
1128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsadd.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1129 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1131 v16u8
vsadd_bu(v16u8 _1
, v16u8 _2
) {
1132 return __builtin_lsx_vsadd_bu(_1
, _2
);
1134 // CHECK-LABEL: @vsadd_hu(
1135 // CHECK-NEXT: entry:
1136 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsadd.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1137 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1139 v8u16
vsadd_hu(v8u16 _1
, v8u16 _2
) {
1140 return __builtin_lsx_vsadd_hu(_1
, _2
);
1142 // CHECK-LABEL: @vsadd_wu(
1143 // CHECK-NEXT: entry:
1144 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsadd.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1145 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1147 v4u32
vsadd_wu(v4u32 _1
, v4u32 _2
) {
1148 return __builtin_lsx_vsadd_wu(_1
, _2
);
1150 // CHECK-LABEL: @vsadd_du(
1151 // CHECK-NEXT: entry:
1152 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsadd.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1153 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1155 v2u64
vsadd_du(v2u64 _1
, v2u64 _2
) {
1156 return __builtin_lsx_vsadd_du(_1
, _2
);
1158 // CHECK-LABEL: @vavg_b(
1159 // CHECK-NEXT: entry:
1160 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavg.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1161 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1163 v16i8
vavg_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vavg_b(_1
, _2
); }
1164 // CHECK-LABEL: @vavg_h(
1165 // CHECK-NEXT: entry:
1166 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavg.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1167 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1169 v8i16
vavg_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vavg_h(_1
, _2
); }
1170 // CHECK-LABEL: @vavg_w(
1171 // CHECK-NEXT: entry:
1172 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavg.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1173 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1175 v4i32
vavg_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vavg_w(_1
, _2
); }
1176 // CHECK-LABEL: @vavg_d(
1177 // CHECK-NEXT: entry:
1178 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavg.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1179 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1181 v2i64
vavg_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vavg_d(_1
, _2
); }
1182 // CHECK-LABEL: @vavg_bu(
1183 // CHECK-NEXT: entry:
1184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavg.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1185 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1187 v16u8
vavg_bu(v16u8 _1
, v16u8 _2
) {
1188 return __builtin_lsx_vavg_bu(_1
, _2
);
1190 // CHECK-LABEL: @vavg_hu(
1191 // CHECK-NEXT: entry:
1192 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavg.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1193 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1195 v8u16
vavg_hu(v8u16 _1
, v8u16 _2
) {
1196 return __builtin_lsx_vavg_hu(_1
, _2
);
1198 // CHECK-LABEL: @vavg_wu(
1199 // CHECK-NEXT: entry:
1200 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavg.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1201 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1203 v4u32
vavg_wu(v4u32 _1
, v4u32 _2
) {
1204 return __builtin_lsx_vavg_wu(_1
, _2
);
1206 // CHECK-LABEL: @vavg_du(
1207 // CHECK-NEXT: entry:
1208 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavg.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1209 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1211 v2u64
vavg_du(v2u64 _1
, v2u64 _2
) {
1212 return __builtin_lsx_vavg_du(_1
, _2
);
1214 // CHECK-LABEL: @vavgr_b(
1215 // CHECK-NEXT: entry:
1216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavgr.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1217 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1219 v16i8
vavgr_b(v16i8 _1
, v16i8 _2
) {
1220 return __builtin_lsx_vavgr_b(_1
, _2
);
1222 // CHECK-LABEL: @vavgr_h(
1223 // CHECK-NEXT: entry:
1224 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavgr.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1225 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1227 v8i16
vavgr_h(v8i16 _1
, v8i16 _2
) {
1228 return __builtin_lsx_vavgr_h(_1
, _2
);
1230 // CHECK-LABEL: @vavgr_w(
1231 // CHECK-NEXT: entry:
1232 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavgr.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1233 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1235 v4i32
vavgr_w(v4i32 _1
, v4i32 _2
) {
1236 return __builtin_lsx_vavgr_w(_1
, _2
);
1238 // CHECK-LABEL: @vavgr_d(
1239 // CHECK-NEXT: entry:
1240 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavgr.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1241 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1243 v2i64
vavgr_d(v2i64 _1
, v2i64 _2
) {
1244 return __builtin_lsx_vavgr_d(_1
, _2
);
1246 // CHECK-LABEL: @vavgr_bu(
1247 // CHECK-NEXT: entry:
1248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vavgr.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1249 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1251 v16u8
vavgr_bu(v16u8 _1
, v16u8 _2
) {
1252 return __builtin_lsx_vavgr_bu(_1
, _2
);
1254 // CHECK-LABEL: @vavgr_hu(
1255 // CHECK-NEXT: entry:
1256 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vavgr.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1257 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1259 v8u16
vavgr_hu(v8u16 _1
, v8u16 _2
) {
1260 return __builtin_lsx_vavgr_hu(_1
, _2
);
1262 // CHECK-LABEL: @vavgr_wu(
1263 // CHECK-NEXT: entry:
1264 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vavgr.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1265 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1267 v4u32
vavgr_wu(v4u32 _1
, v4u32 _2
) {
1268 return __builtin_lsx_vavgr_wu(_1
, _2
);
1270 // CHECK-LABEL: @vavgr_du(
1271 // CHECK-NEXT: entry:
1272 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vavgr.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1273 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1275 v2u64
vavgr_du(v2u64 _1
, v2u64 _2
) {
1276 return __builtin_lsx_vavgr_du(_1
, _2
);
1278 // CHECK-LABEL: @vssub_b(
1279 // CHECK-NEXT: entry:
1280 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssub.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1281 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1283 v16i8
vssub_b(v16i8 _1
, v16i8 _2
) {
1284 return __builtin_lsx_vssub_b(_1
, _2
);
1286 // CHECK-LABEL: @vssub_h(
1287 // CHECK-NEXT: entry:
1288 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssub.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1289 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1291 v8i16
vssub_h(v8i16 _1
, v8i16 _2
) {
1292 return __builtin_lsx_vssub_h(_1
, _2
);
1294 // CHECK-LABEL: @vssub_w(
1295 // CHECK-NEXT: entry:
1296 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssub.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1297 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1299 v4i32
vssub_w(v4i32 _1
, v4i32 _2
) {
1300 return __builtin_lsx_vssub_w(_1
, _2
);
1302 // CHECK-LABEL: @vssub_d(
1303 // CHECK-NEXT: entry:
1304 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssub.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1305 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1307 v2i64
vssub_d(v2i64 _1
, v2i64 _2
) {
1308 return __builtin_lsx_vssub_d(_1
, _2
);
1310 // CHECK-LABEL: @vssub_bu(
1311 // CHECK-NEXT: entry:
1312 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssub.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1313 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1315 v16u8
vssub_bu(v16u8 _1
, v16u8 _2
) {
1316 return __builtin_lsx_vssub_bu(_1
, _2
);
1318 // CHECK-LABEL: @vssub_hu(
1319 // CHECK-NEXT: entry:
1320 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssub.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1321 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1323 v8u16
vssub_hu(v8u16 _1
, v8u16 _2
) {
1324 return __builtin_lsx_vssub_hu(_1
, _2
);
1326 // CHECK-LABEL: @vssub_wu(
1327 // CHECK-NEXT: entry:
1328 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssub.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1329 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1331 v4u32
vssub_wu(v4u32 _1
, v4u32 _2
) {
1332 return __builtin_lsx_vssub_wu(_1
, _2
);
1334 // CHECK-LABEL: @vssub_du(
1335 // CHECK-NEXT: entry:
1336 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssub.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1337 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1339 v2u64
vssub_du(v2u64 _1
, v2u64 _2
) {
1340 return __builtin_lsx_vssub_du(_1
, _2
);
1342 // CHECK-LABEL: @vabsd_b(
1343 // CHECK-NEXT: entry:
1344 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vabsd.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1345 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1347 v16i8
vabsd_b(v16i8 _1
, v16i8 _2
) {
1348 return __builtin_lsx_vabsd_b(_1
, _2
);
1350 // CHECK-LABEL: @vabsd_h(
1351 // CHECK-NEXT: entry:
1352 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vabsd.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1353 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1355 v8i16
vabsd_h(v8i16 _1
, v8i16 _2
) {
1356 return __builtin_lsx_vabsd_h(_1
, _2
);
1358 // CHECK-LABEL: @vabsd_w(
1359 // CHECK-NEXT: entry:
1360 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vabsd.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1361 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1363 v4i32
vabsd_w(v4i32 _1
, v4i32 _2
) {
1364 return __builtin_lsx_vabsd_w(_1
, _2
);
1366 // CHECK-LABEL: @vabsd_d(
1367 // CHECK-NEXT: entry:
1368 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vabsd.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1369 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1371 v2i64
vabsd_d(v2i64 _1
, v2i64 _2
) {
1372 return __builtin_lsx_vabsd_d(_1
, _2
);
1374 // CHECK-LABEL: @vabsd_bu(
1375 // CHECK-NEXT: entry:
1376 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vabsd.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1377 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1379 v16u8
vabsd_bu(v16u8 _1
, v16u8 _2
) {
1380 return __builtin_lsx_vabsd_bu(_1
, _2
);
1382 // CHECK-LABEL: @vabsd_hu(
1383 // CHECK-NEXT: entry:
1384 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vabsd.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1385 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1387 v8u16
vabsd_hu(v8u16 _1
, v8u16 _2
) {
1388 return __builtin_lsx_vabsd_hu(_1
, _2
);
1390 // CHECK-LABEL: @vabsd_wu(
1391 // CHECK-NEXT: entry:
1392 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vabsd.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1393 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1395 v4u32
vabsd_wu(v4u32 _1
, v4u32 _2
) {
1396 return __builtin_lsx_vabsd_wu(_1
, _2
);
1398 // CHECK-LABEL: @vabsd_du(
1399 // CHECK-NEXT: entry:
1400 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vabsd.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1401 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1403 v2u64
vabsd_du(v2u64 _1
, v2u64 _2
) {
1404 return __builtin_lsx_vabsd_du(_1
, _2
);
1406 // CHECK-LABEL: @vmul_b(
1407 // CHECK-NEXT: entry:
1408 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmul.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1409 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1411 v16i8
vmul_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vmul_b(_1
, _2
); }
1412 // CHECK-LABEL: @vmul_h(
1413 // CHECK-NEXT: entry:
1414 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmul.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1415 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1417 v8i16
vmul_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vmul_h(_1
, _2
); }
1418 // CHECK-LABEL: @vmul_w(
1419 // CHECK-NEXT: entry:
1420 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmul.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1421 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1423 v4i32
vmul_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vmul_w(_1
, _2
); }
1424 // CHECK-LABEL: @vmul_d(
1425 // CHECK-NEXT: entry:
1426 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmul.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1427 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1429 v2i64
vmul_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vmul_d(_1
, _2
); }
1430 // CHECK-LABEL: @vmadd_b(
1431 // CHECK-NEXT: entry:
1432 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmadd.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
1433 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1435 v16i8
vmadd_b(v16i8 _1
, v16i8 _2
, v16i8 _3
) {
1436 return __builtin_lsx_vmadd_b(_1
, _2
, _3
);
1438 // CHECK-LABEL: @vmadd_h(
1439 // CHECK-NEXT: entry:
1440 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmadd.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
1441 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1443 v8i16
vmadd_h(v8i16 _1
, v8i16 _2
, v8i16 _3
) {
1444 return __builtin_lsx_vmadd_h(_1
, _2
, _3
);
1446 // CHECK-LABEL: @vmadd_w(
1447 // CHECK-NEXT: entry:
1448 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmadd.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
1449 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1451 v4i32
vmadd_w(v4i32 _1
, v4i32 _2
, v4i32 _3
) {
1452 return __builtin_lsx_vmadd_w(_1
, _2
, _3
);
1454 // CHECK-LABEL: @vmadd_d(
1455 // CHECK-NEXT: entry:
1456 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmadd.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
1457 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1459 v2i64
vmadd_d(v2i64 _1
, v2i64 _2
, v2i64 _3
) {
1460 return __builtin_lsx_vmadd_d(_1
, _2
, _3
);
1462 // CHECK-LABEL: @vmsub_b(
1463 // CHECK-NEXT: entry:
1464 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmsub.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
1465 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1467 v16i8
vmsub_b(v16i8 _1
, v16i8 _2
, v16i8 _3
) {
1468 return __builtin_lsx_vmsub_b(_1
, _2
, _3
);
1470 // CHECK-LABEL: @vmsub_h(
1471 // CHECK-NEXT: entry:
1472 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmsub.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
1473 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1475 v8i16
vmsub_h(v8i16 _1
, v8i16 _2
, v8i16 _3
) {
1476 return __builtin_lsx_vmsub_h(_1
, _2
, _3
);
1478 // CHECK-LABEL: @vmsub_w(
1479 // CHECK-NEXT: entry:
1480 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmsub.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
1481 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1483 v4i32
vmsub_w(v4i32 _1
, v4i32 _2
, v4i32 _3
) {
1484 return __builtin_lsx_vmsub_w(_1
, _2
, _3
);
1486 // CHECK-LABEL: @vmsub_d(
1487 // CHECK-NEXT: entry:
1488 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmsub.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
1489 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1491 v2i64
vmsub_d(v2i64 _1
, v2i64 _2
, v2i64 _3
) {
1492 return __builtin_lsx_vmsub_d(_1
, _2
, _3
);
1494 // CHECK-LABEL: @vdiv_b(
1495 // CHECK-NEXT: entry:
1496 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vdiv.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1497 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1499 v16i8
vdiv_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vdiv_b(_1
, _2
); }
1500 // CHECK-LABEL: @vdiv_h(
1501 // CHECK-NEXT: entry:
1502 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vdiv.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1503 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1505 v8i16
vdiv_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vdiv_h(_1
, _2
); }
1506 // CHECK-LABEL: @vdiv_w(
1507 // CHECK-NEXT: entry:
1508 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vdiv.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1509 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1511 v4i32
vdiv_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vdiv_w(_1
, _2
); }
1512 // CHECK-LABEL: @vdiv_d(
1513 // CHECK-NEXT: entry:
1514 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vdiv.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1515 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1517 v2i64
vdiv_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vdiv_d(_1
, _2
); }
1518 // CHECK-LABEL: @vdiv_bu(
1519 // CHECK-NEXT: entry:
1520 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vdiv.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1521 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1523 v16u8
vdiv_bu(v16u8 _1
, v16u8 _2
) {
1524 return __builtin_lsx_vdiv_bu(_1
, _2
);
1526 // CHECK-LABEL: @vdiv_hu(
1527 // CHECK-NEXT: entry:
1528 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vdiv.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1529 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1531 v8u16
vdiv_hu(v8u16 _1
, v8u16 _2
) {
1532 return __builtin_lsx_vdiv_hu(_1
, _2
);
1534 // CHECK-LABEL: @vdiv_wu(
1535 // CHECK-NEXT: entry:
1536 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vdiv.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1537 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1539 v4u32
vdiv_wu(v4u32 _1
, v4u32 _2
) {
1540 return __builtin_lsx_vdiv_wu(_1
, _2
);
1542 // CHECK-LABEL: @vdiv_du(
1543 // CHECK-NEXT: entry:
1544 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vdiv.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1545 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1547 v2u64
vdiv_du(v2u64 _1
, v2u64 _2
) {
1548 return __builtin_lsx_vdiv_du(_1
, _2
);
1550 // CHECK-LABEL: @vhaddw_h_b(
1551 // CHECK-NEXT: entry:
1552 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhaddw.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1553 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1555 v8i16
vhaddw_h_b(v16i8 _1
, v16i8 _2
) {
1556 return __builtin_lsx_vhaddw_h_b(_1
, _2
);
1558 // CHECK-LABEL: @vhaddw_w_h(
1559 // CHECK-NEXT: entry:
1560 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhaddw.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1561 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1563 v4i32
vhaddw_w_h(v8i16 _1
, v8i16 _2
) {
1564 return __builtin_lsx_vhaddw_w_h(_1
, _2
);
1566 // CHECK-LABEL: @vhaddw_d_w(
1567 // CHECK-NEXT: entry:
1568 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1569 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1571 v2i64
vhaddw_d_w(v4i32 _1
, v4i32 _2
) {
1572 return __builtin_lsx_vhaddw_d_w(_1
, _2
);
1574 // CHECK-LABEL: @vhaddw_hu_bu(
1575 // CHECK-NEXT: entry:
1576 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhaddw.hu.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1577 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1579 v8u16
vhaddw_hu_bu(v16u8 _1
, v16u8 _2
) {
1580 return __builtin_lsx_vhaddw_hu_bu(_1
, _2
);
1582 // CHECK-LABEL: @vhaddw_wu_hu(
1583 // CHECK-NEXT: entry:
1584 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhaddw.wu.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1585 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1587 v4u32
vhaddw_wu_hu(v8u16 _1
, v8u16 _2
) {
1588 return __builtin_lsx_vhaddw_wu_hu(_1
, _2
);
1590 // CHECK-LABEL: @vhaddw_du_wu(
1591 // CHECK-NEXT: entry:
1592 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.du.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1593 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1595 v2u64
vhaddw_du_wu(v4u32 _1
, v4u32 _2
) {
1596 return __builtin_lsx_vhaddw_du_wu(_1
, _2
);
1598 // CHECK-LABEL: @vhsubw_h_b(
1599 // CHECK-NEXT: entry:
1600 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhsubw.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1601 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1603 v8i16
vhsubw_h_b(v16i8 _1
, v16i8 _2
) {
1604 return __builtin_lsx_vhsubw_h_b(_1
, _2
);
1606 // CHECK-LABEL: @vhsubw_w_h(
1607 // CHECK-NEXT: entry:
1608 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhsubw.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1609 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1611 v4i32
vhsubw_w_h(v8i16 _1
, v8i16 _2
) {
1612 return __builtin_lsx_vhsubw_w_h(_1
, _2
);
1614 // CHECK-LABEL: @vhsubw_d_w(
1615 // CHECK-NEXT: entry:
1616 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1617 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1619 v2i64
vhsubw_d_w(v4i32 _1
, v4i32 _2
) {
1620 return __builtin_lsx_vhsubw_d_w(_1
, _2
);
1622 // CHECK-LABEL: @vhsubw_hu_bu(
1623 // CHECK-NEXT: entry:
1624 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vhsubw.hu.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1625 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1627 v8i16
vhsubw_hu_bu(v16u8 _1
, v16u8 _2
) {
1628 return __builtin_lsx_vhsubw_hu_bu(_1
, _2
);
1630 // CHECK-LABEL: @vhsubw_wu_hu(
1631 // CHECK-NEXT: entry:
1632 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vhsubw.wu.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1633 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1635 v4i32
vhsubw_wu_hu(v8u16 _1
, v8u16 _2
) {
1636 return __builtin_lsx_vhsubw_wu_hu(_1
, _2
);
1638 // CHECK-LABEL: @vhsubw_du_wu(
1639 // CHECK-NEXT: entry:
1640 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.du.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1641 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1643 v2i64
vhsubw_du_wu(v4u32 _1
, v4u32 _2
) {
1644 return __builtin_lsx_vhsubw_du_wu(_1
, _2
);
1646 // CHECK-LABEL: @vmod_b(
1647 // CHECK-NEXT: entry:
1648 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmod.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1649 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1651 v16i8
vmod_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vmod_b(_1
, _2
); }
1652 // CHECK-LABEL: @vmod_h(
1653 // CHECK-NEXT: entry:
1654 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmod.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1655 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1657 v8i16
vmod_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vmod_h(_1
, _2
); }
1658 // CHECK-LABEL: @vmod_w(
1659 // CHECK-NEXT: entry:
1660 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmod.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1661 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1663 v4i32
vmod_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vmod_w(_1
, _2
); }
1664 // CHECK-LABEL: @vmod_d(
1665 // CHECK-NEXT: entry:
1666 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmod.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1667 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1669 v2i64
vmod_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vmod_d(_1
, _2
); }
1670 // CHECK-LABEL: @vmod_bu(
1671 // CHECK-NEXT: entry:
1672 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmod.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1673 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1675 v16u8
vmod_bu(v16u8 _1
, v16u8 _2
) {
1676 return __builtin_lsx_vmod_bu(_1
, _2
);
1678 // CHECK-LABEL: @vmod_hu(
1679 // CHECK-NEXT: entry:
1680 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmod.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1681 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1683 v8u16
vmod_hu(v8u16 _1
, v8u16 _2
) {
1684 return __builtin_lsx_vmod_hu(_1
, _2
);
1686 // CHECK-LABEL: @vmod_wu(
1687 // CHECK-NEXT: entry:
1688 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmod.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1689 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1691 v4u32
vmod_wu(v4u32 _1
, v4u32 _2
) {
1692 return __builtin_lsx_vmod_wu(_1
, _2
);
1694 // CHECK-LABEL: @vmod_du(
1695 // CHECK-NEXT: entry:
1696 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmod.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1697 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1699 v2u64
vmod_du(v2u64 _1
, v2u64 _2
) {
1700 return __builtin_lsx_vmod_du(_1
, _2
);
1702 // CHECK-LABEL: @vreplve_b(
1703 // CHECK-NEXT: entry:
1704 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplve.b(<16 x i8> [[_1:%.*]], i32 [[_2:%.*]])
1705 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1707 v16i8
vreplve_b(v16i8 _1
, int _2
) {
1708 return __builtin_lsx_vreplve_b(_1
, _2
);
1710 // CHECK-LABEL: @vreplve_h(
1711 // CHECK-NEXT: entry:
1712 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplve.h(<8 x i16> [[_1:%.*]], i32 [[_2:%.*]])
1713 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1715 v8i16
vreplve_h(v8i16 _1
, int _2
) {
1716 return __builtin_lsx_vreplve_h(_1
, _2
);
1718 // CHECK-LABEL: @vreplve_w(
1719 // CHECK-NEXT: entry:
1720 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplve.w(<4 x i32> [[_1:%.*]], i32 [[_2:%.*]])
1721 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1723 v4i32
vreplve_w(v4i32 _1
, int _2
) {
1724 return __builtin_lsx_vreplve_w(_1
, _2
);
1726 // CHECK-LABEL: @vreplve_d(
1727 // CHECK-NEXT: entry:
1728 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplve.d(<2 x i64> [[_1:%.*]], i32 [[_2:%.*]])
1729 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1731 v2i64
vreplve_d(v2i64 _1
, int _2
) {
1732 return __builtin_lsx_vreplve_d(_1
, _2
);
1734 // CHECK-LABEL: @vreplvei_b(
1735 // CHECK-NEXT: entry:
1736 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplvei.b(<16 x i8> [[_1:%.*]], i32 1)
1737 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1739 v16i8
vreplvei_b(v16i8 _1
) { return __builtin_lsx_vreplvei_b(_1
, 1); }
1740 // CHECK-LABEL: @vreplvei_h(
1741 // CHECK-NEXT: entry:
1742 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplvei.h(<8 x i16> [[_1:%.*]], i32 1)
1743 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1745 v8i16
vreplvei_h(v8i16 _1
) { return __builtin_lsx_vreplvei_h(_1
, 1); }
1746 // CHECK-LABEL: @vreplvei_w(
1747 // CHECK-NEXT: entry:
1748 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplvei.w(<4 x i32> [[_1:%.*]], i32 1)
1749 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1751 v4i32
vreplvei_w(v4i32 _1
) { return __builtin_lsx_vreplvei_w(_1
, 1); }
1752 // CHECK-LABEL: @vreplvei_d(
1753 // CHECK-NEXT: entry:
1754 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplvei.d(<2 x i64> [[_1:%.*]], i32 1)
1755 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1757 v2i64
vreplvei_d(v2i64 _1
) { return __builtin_lsx_vreplvei_d(_1
, 1); }
1758 // CHECK-LABEL: @vpickev_b(
1759 // CHECK-NEXT: entry:
1760 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpickev.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1761 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1763 v16i8
vpickev_b(v16i8 _1
, v16i8 _2
) {
1764 return __builtin_lsx_vpickev_b(_1
, _2
);
1766 // CHECK-LABEL: @vpickev_h(
1767 // CHECK-NEXT: entry:
1768 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpickev.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1769 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1771 v8i16
vpickev_h(v8i16 _1
, v8i16 _2
) {
1772 return __builtin_lsx_vpickev_h(_1
, _2
);
1774 // CHECK-LABEL: @vpickev_w(
1775 // CHECK-NEXT: entry:
1776 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpickev.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1777 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1779 v4i32
vpickev_w(v4i32 _1
, v4i32 _2
) {
1780 return __builtin_lsx_vpickev_w(_1
, _2
);
1782 // CHECK-LABEL: @vpickev_d(
1783 // CHECK-NEXT: entry:
1784 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpickev.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1785 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1787 v2i64
vpickev_d(v2i64 _1
, v2i64 _2
) {
1788 return __builtin_lsx_vpickev_d(_1
, _2
);
1790 // CHECK-LABEL: @vpickod_b(
1791 // CHECK-NEXT: entry:
1792 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpickod.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1793 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1795 v16i8
vpickod_b(v16i8 _1
, v16i8 _2
) {
1796 return __builtin_lsx_vpickod_b(_1
, _2
);
1798 // CHECK-LABEL: @vpickod_h(
1799 // CHECK-NEXT: entry:
1800 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpickod.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1801 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1803 v8i16
vpickod_h(v8i16 _1
, v8i16 _2
) {
1804 return __builtin_lsx_vpickod_h(_1
, _2
);
1806 // CHECK-LABEL: @vpickod_w(
1807 // CHECK-NEXT: entry:
1808 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpickod.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1809 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1811 v4i32
vpickod_w(v4i32 _1
, v4i32 _2
) {
1812 return __builtin_lsx_vpickod_w(_1
, _2
);
1814 // CHECK-LABEL: @vpickod_d(
1815 // CHECK-NEXT: entry:
1816 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpickod.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1817 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1819 v2i64
vpickod_d(v2i64 _1
, v2i64 _2
) {
1820 return __builtin_lsx_vpickod_d(_1
, _2
);
1822 // CHECK-LABEL: @vilvh_b(
1823 // CHECK-NEXT: entry:
1824 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vilvh.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1825 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1827 v16i8
vilvh_b(v16i8 _1
, v16i8 _2
) {
1828 return __builtin_lsx_vilvh_b(_1
, _2
);
1830 // CHECK-LABEL: @vilvh_h(
1831 // CHECK-NEXT: entry:
1832 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vilvh.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1833 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1835 v8i16
vilvh_h(v8i16 _1
, v8i16 _2
) {
1836 return __builtin_lsx_vilvh_h(_1
, _2
);
1838 // CHECK-LABEL: @vilvh_w(
1839 // CHECK-NEXT: entry:
1840 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vilvh.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1841 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1843 v4i32
vilvh_w(v4i32 _1
, v4i32 _2
) {
1844 return __builtin_lsx_vilvh_w(_1
, _2
);
1846 // CHECK-LABEL: @vilvh_d(
1847 // CHECK-NEXT: entry:
1848 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vilvh.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1849 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1851 v2i64
vilvh_d(v2i64 _1
, v2i64 _2
) {
1852 return __builtin_lsx_vilvh_d(_1
, _2
);
1854 // CHECK-LABEL: @vilvl_b(
1855 // CHECK-NEXT: entry:
1856 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vilvl.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1857 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1859 v16i8
vilvl_b(v16i8 _1
, v16i8 _2
) {
1860 return __builtin_lsx_vilvl_b(_1
, _2
);
1862 // CHECK-LABEL: @vilvl_h(
1863 // CHECK-NEXT: entry:
1864 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vilvl.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1865 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1867 v8i16
vilvl_h(v8i16 _1
, v8i16 _2
) {
1868 return __builtin_lsx_vilvl_h(_1
, _2
);
1870 // CHECK-LABEL: @vilvl_w(
1871 // CHECK-NEXT: entry:
1872 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vilvl.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1873 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1875 v4i32
vilvl_w(v4i32 _1
, v4i32 _2
) {
1876 return __builtin_lsx_vilvl_w(_1
, _2
);
1878 // CHECK-LABEL: @vilvl_d(
1879 // CHECK-NEXT: entry:
1880 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vilvl.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1881 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1883 v2i64
vilvl_d(v2i64 _1
, v2i64 _2
) {
1884 return __builtin_lsx_vilvl_d(_1
, _2
);
1886 // CHECK-LABEL: @vpackev_b(
1887 // CHECK-NEXT: entry:
1888 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpackev.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1889 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1891 v16i8
vpackev_b(v16i8 _1
, v16i8 _2
) {
1892 return __builtin_lsx_vpackev_b(_1
, _2
);
1894 // CHECK-LABEL: @vpackev_h(
1895 // CHECK-NEXT: entry:
1896 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpackev.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1897 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1899 v8i16
vpackev_h(v8i16 _1
, v8i16 _2
) {
1900 return __builtin_lsx_vpackev_h(_1
, _2
);
1902 // CHECK-LABEL: @vpackev_w(
1903 // CHECK-NEXT: entry:
1904 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpackev.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1905 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1907 v4i32
vpackev_w(v4i32 _1
, v4i32 _2
) {
1908 return __builtin_lsx_vpackev_w(_1
, _2
);
1910 // CHECK-LABEL: @vpackev_d(
1911 // CHECK-NEXT: entry:
1912 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpackev.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1913 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1915 v2i64
vpackev_d(v2i64 _1
, v2i64 _2
) {
1916 return __builtin_lsx_vpackev_d(_1
, _2
);
1918 // CHECK-LABEL: @vpackod_b(
1919 // CHECK-NEXT: entry:
1920 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpackod.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1921 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1923 v16i8
vpackod_b(v16i8 _1
, v16i8 _2
) {
1924 return __builtin_lsx_vpackod_b(_1
, _2
);
1926 // CHECK-LABEL: @vpackod_h(
1927 // CHECK-NEXT: entry:
1928 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpackod.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
1929 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1931 v8i16
vpackod_h(v8i16 _1
, v8i16 _2
) {
1932 return __builtin_lsx_vpackod_h(_1
, _2
);
1934 // CHECK-LABEL: @vpackod_w(
1935 // CHECK-NEXT: entry:
1936 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpackod.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
1937 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1939 v4i32
vpackod_w(v4i32 _1
, v4i32 _2
) {
1940 return __builtin_lsx_vpackod_w(_1
, _2
);
1942 // CHECK-LABEL: @vpackod_d(
1943 // CHECK-NEXT: entry:
1944 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpackod.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
1945 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1947 v2i64
vpackod_d(v2i64 _1
, v2i64 _2
) {
1948 return __builtin_lsx_vpackod_d(_1
, _2
);
1950 // CHECK-LABEL: @vshuf_h(
1951 // CHECK-NEXT: entry:
1952 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vshuf.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
1953 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1955 v8i16
vshuf_h(v8i16 _1
, v8i16 _2
, v8i16 _3
) {
1956 return __builtin_lsx_vshuf_h(_1
, _2
, _3
);
1958 // CHECK-LABEL: @vshuf_w(
1959 // CHECK-NEXT: entry:
1960 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vshuf.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
1961 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1963 v4i32
vshuf_w(v4i32 _1
, v4i32 _2
, v4i32 _3
) {
1964 return __builtin_lsx_vshuf_w(_1
, _2
, _3
);
1966 // CHECK-LABEL: @vshuf_d(
1967 // CHECK-NEXT: entry:
1968 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vshuf.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
1969 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
1971 v2i64
vshuf_d(v2i64 _1
, v2i64 _2
, v2i64 _3
) {
1972 return __builtin_lsx_vshuf_d(_1
, _2
, _3
);
1974 // CHECK-LABEL: @vand_v(
1975 // CHECK-NEXT: entry:
1976 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vand.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1977 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1979 v16u8
vand_v(v16u8 _1
, v16u8 _2
) { return __builtin_lsx_vand_v(_1
, _2
); }
1980 // CHECK-LABEL: @vandi_b(
1981 // CHECK-NEXT: entry:
1982 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vandi.b(<16 x i8> [[_1:%.*]], i32 1)
1983 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1985 v16u8
vandi_b(v16u8 _1
) { return __builtin_lsx_vandi_b(_1
, 1); }
1986 // CHECK-LABEL: @vor_v(
1987 // CHECK-NEXT: entry:
1988 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vor.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
1989 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1991 v16u8
vor_v(v16u8 _1
, v16u8 _2
) { return __builtin_lsx_vor_v(_1
, _2
); }
1992 // CHECK-LABEL: @vori_b(
1993 // CHECK-NEXT: entry:
1994 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vori.b(<16 x i8> [[_1:%.*]], i32 1)
1995 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1997 v16u8
vori_b(v16u8 _1
) { return __builtin_lsx_vori_b(_1
, 1); }
1998 // CHECK-LABEL: @vnor_v(
1999 // CHECK-NEXT: entry:
2000 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vnor.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
2001 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2003 v16u8
vnor_v(v16u8 _1
, v16u8 _2
) { return __builtin_lsx_vnor_v(_1
, _2
); }
2004 // CHECK-LABEL: @vnori_b(
2005 // CHECK-NEXT: entry:
2006 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vnori.b(<16 x i8> [[_1:%.*]], i32 1)
2007 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2009 v16u8
vnori_b(v16u8 _1
) { return __builtin_lsx_vnori_b(_1
, 1); }
2010 // CHECK-LABEL: @vxor_v(
2011 // CHECK-NEXT: entry:
2012 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vxor.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
2013 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2015 v16u8
vxor_v(v16u8 _1
, v16u8 _2
) { return __builtin_lsx_vxor_v(_1
, _2
); }
2016 // CHECK-LABEL: @vxori_b(
2017 // CHECK-NEXT: entry:
2018 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vxori.b(<16 x i8> [[_1:%.*]], i32 1)
2019 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2021 v16u8
vxori_b(v16u8 _1
) { return __builtin_lsx_vxori_b(_1
, 1); }
2022 // CHECK-LABEL: @vbitsel_v(
2023 // CHECK-NEXT: entry:
2024 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitsel.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
2025 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2027 v16u8
vbitsel_v(v16u8 _1
, v16u8 _2
, v16u8 _3
) {
2028 return __builtin_lsx_vbitsel_v(_1
, _2
, _3
);
2030 // CHECK-LABEL: @vbitseli_b(
2031 // CHECK-NEXT: entry:
2032 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbitseli.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
2033 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2035 v16u8
vbitseli_b(v16u8 _1
, v16u8 _2
) {
2036 return __builtin_lsx_vbitseli_b(_1
, _2
, 1);
2038 // CHECK-LABEL: @vshuf4i_b(
2039 // CHECK-NEXT: entry:
2040 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vshuf4i.b(<16 x i8> [[_1:%.*]], i32 1)
2041 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2043 v16i8
vshuf4i_b(v16i8 _1
) { return __builtin_lsx_vshuf4i_b(_1
, 1); }
2044 // CHECK-LABEL: @vshuf4i_h(
2045 // CHECK-NEXT: entry:
2046 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vshuf4i.h(<8 x i16> [[_1:%.*]], i32 1)
2047 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2049 v8i16
vshuf4i_h(v8i16 _1
) { return __builtin_lsx_vshuf4i_h(_1
, 1); }
2050 // CHECK-LABEL: @vshuf4i_w(
2051 // CHECK-NEXT: entry:
2052 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vshuf4i.w(<4 x i32> [[_1:%.*]], i32 1)
2053 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2055 v4i32
vshuf4i_w(v4i32 _1
) { return __builtin_lsx_vshuf4i_w(_1
, 1); }
2056 // CHECK-LABEL: @vreplgr2vr_b(
2057 // CHECK-NEXT: entry:
2058 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32 [[_1:%.*]])
2059 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2061 v16i8
vreplgr2vr_b(int _1
) { return __builtin_lsx_vreplgr2vr_b(_1
); }
2062 // CHECK-LABEL: @vreplgr2vr_h(
2063 // CHECK-NEXT: entry:
2064 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vreplgr2vr.h(i32 [[_1:%.*]])
2065 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2067 v8i16
vreplgr2vr_h(int _1
) { return __builtin_lsx_vreplgr2vr_h(_1
); }
2068 // CHECK-LABEL: @vreplgr2vr_w(
2069 // CHECK-NEXT: entry:
2070 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32 [[_1:%.*]])
2071 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2073 v4i32
vreplgr2vr_w(int _1
) { return __builtin_lsx_vreplgr2vr_w(_1
); }
2074 // CHECK-LABEL: @vreplgr2vr_d(
2075 // CHECK-NEXT: entry:
2076 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 [[_1:%.*]])
2077 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2079 v2i64
vreplgr2vr_d(long _1
) { return __builtin_lsx_vreplgr2vr_d(_1
); }
2080 // CHECK-LABEL: @vpcnt_b(
2081 // CHECK-NEXT: entry:
2082 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vpcnt.b(<16 x i8> [[_1:%.*]])
2083 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2085 v16i8
vpcnt_b(v16i8 _1
) { return __builtin_lsx_vpcnt_b(_1
); }
2086 // CHECK-LABEL: @vpcnt_h(
2087 // CHECK-NEXT: entry:
2088 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vpcnt.h(<8 x i16> [[_1:%.*]])
2089 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2091 v8i16
vpcnt_h(v8i16 _1
) { return __builtin_lsx_vpcnt_h(_1
); }
2092 // CHECK-LABEL: @vpcnt_w(
2093 // CHECK-NEXT: entry:
2094 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpcnt.w(<4 x i32> [[_1:%.*]])
2095 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2097 v4i32
vpcnt_w(v4i32 _1
) { return __builtin_lsx_vpcnt_w(_1
); }
2098 // CHECK-LABEL: @vpcnt_d(
2099 // CHECK-NEXT: entry:
2100 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vpcnt.d(<2 x i64> [[_1:%.*]])
2101 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2103 v2i64
vpcnt_d(v2i64 _1
) { return __builtin_lsx_vpcnt_d(_1
); }
2104 // CHECK-LABEL: @vclo_b(
2105 // CHECK-NEXT: entry:
2106 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vclo.b(<16 x i8> [[_1:%.*]])
2107 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2109 v16i8
vclo_b(v16i8 _1
) { return __builtin_lsx_vclo_b(_1
); }
2110 // CHECK-LABEL: @vclo_h(
2111 // CHECK-NEXT: entry:
2112 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vclo.h(<8 x i16> [[_1:%.*]])
2113 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2115 v8i16
vclo_h(v8i16 _1
) { return __builtin_lsx_vclo_h(_1
); }
2116 // CHECK-LABEL: @vclo_w(
2117 // CHECK-NEXT: entry:
2118 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vclo.w(<4 x i32> [[_1:%.*]])
2119 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2121 v4i32
vclo_w(v4i32 _1
) { return __builtin_lsx_vclo_w(_1
); }
2122 // CHECK-LABEL: @vclo_d(
2123 // CHECK-NEXT: entry:
2124 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vclo.d(<2 x i64> [[_1:%.*]])
2125 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2127 v2i64
vclo_d(v2i64 _1
) { return __builtin_lsx_vclo_d(_1
); }
2128 // CHECK-LABEL: @vclz_b(
2129 // CHECK-NEXT: entry:
2130 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vclz.b(<16 x i8> [[_1:%.*]])
2131 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2133 v16i8
vclz_b(v16i8 _1
) { return __builtin_lsx_vclz_b(_1
); }
2134 // CHECK-LABEL: @vclz_h(
2135 // CHECK-NEXT: entry:
2136 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vclz.h(<8 x i16> [[_1:%.*]])
2137 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2139 v8i16
vclz_h(v8i16 _1
) { return __builtin_lsx_vclz_h(_1
); }
2140 // CHECK-LABEL: @vclz_w(
2141 // CHECK-NEXT: entry:
2142 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vclz.w(<4 x i32> [[_1:%.*]])
2143 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2145 v4i32
vclz_w(v4i32 _1
) { return __builtin_lsx_vclz_w(_1
); }
2146 // CHECK-LABEL: @vclz_d(
2147 // CHECK-NEXT: entry:
2148 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vclz.d(<2 x i64> [[_1:%.*]])
2149 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2151 v2i64
vclz_d(v2i64 _1
) { return __builtin_lsx_vclz_d(_1
); }
2152 // CHECK-LABEL: @vpickve2gr_b(
2153 // CHECK-NEXT: entry:
2154 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8> [[_1:%.*]], i32 1)
2155 // CHECK-NEXT: ret i32 [[TMP0]]
2157 int vpickve2gr_b(v16i8 _1
) { return __builtin_lsx_vpickve2gr_b(_1
, 1); }
2158 // CHECK-LABEL: @vpickve2gr_h(
2159 // CHECK-NEXT: entry:
2160 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.h(<8 x i16> [[_1:%.*]], i32 1)
2161 // CHECK-NEXT: ret i32 [[TMP0]]
2163 int vpickve2gr_h(v8i16 _1
) { return __builtin_lsx_vpickve2gr_h(_1
, 1); }
2164 // CHECK-LABEL: @vpickve2gr_w(
2165 // CHECK-NEXT: entry:
2166 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.w(<4 x i32> [[_1:%.*]], i32 1)
2167 // CHECK-NEXT: ret i32 [[TMP0]]
2169 int vpickve2gr_w(v4i32 _1
) { return __builtin_lsx_vpickve2gr_w(_1
, 1); }
2170 // CHECK-LABEL: @vpickve2gr_d(
2171 // CHECK-NEXT: entry:
2172 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> [[_1:%.*]], i32 1)
2173 // CHECK-NEXT: ret i64 [[TMP0]]
2175 long vpickve2gr_d(v2i64 _1
) { return __builtin_lsx_vpickve2gr_d(_1
, 1); }
2176 // CHECK-LABEL: @vpickve2gr_bu(
2177 // CHECK-NEXT: entry:
2178 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8> [[_1:%.*]], i32 1)
2179 // CHECK-NEXT: ret i32 [[TMP0]]
2181 unsigned int vpickve2gr_bu(v16i8 _1
) {
2182 return __builtin_lsx_vpickve2gr_bu(_1
, 1);
2184 // CHECK-LABEL: @vpickve2gr_hu(
2185 // CHECK-NEXT: entry:
2186 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.hu(<8 x i16> [[_1:%.*]], i32 1)
2187 // CHECK-NEXT: ret i32 [[TMP0]]
2189 unsigned int vpickve2gr_hu(v8i16 _1
) {
2190 return __builtin_lsx_vpickve2gr_hu(_1
, 1);
2192 // CHECK-LABEL: @vpickve2gr_wu(
2193 // CHECK-NEXT: entry:
2194 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32> [[_1:%.*]], i32 1)
2195 // CHECK-NEXT: ret i32 [[TMP0]]
2197 unsigned int vpickve2gr_wu(v4i32 _1
) {
2198 return __builtin_lsx_vpickve2gr_wu(_1
, 1);
2200 // CHECK-LABEL: @vpickve2gr_du(
2201 // CHECK-NEXT: entry:
2202 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> [[_1:%.*]], i32 1)
2203 // CHECK-NEXT: ret i64 [[TMP0]]
2205 unsigned long int vpickve2gr_du(v2i64 _1
) {
2206 return __builtin_lsx_vpickve2gr_du(_1
, 1);
2208 // CHECK-LABEL: @vinsgr2vr_b(
2209 // CHECK-NEXT: entry:
2210 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vinsgr2vr.b(<16 x i8> [[_1:%.*]], i32 1, i32 1)
2211 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2213 v16i8
vinsgr2vr_b(v16i8 _1
) {
2214 return __builtin_lsx_vinsgr2vr_b(_1
, 1, 1);
2216 // CHECK-LABEL: @vinsgr2vr_h(
2217 // CHECK-NEXT: entry:
2218 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vinsgr2vr.h(<8 x i16> [[_1:%.*]], i32 1, i32 1)
2219 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2221 v8i16
vinsgr2vr_h(v8i16 _1
) {
2222 return __builtin_lsx_vinsgr2vr_h(_1
, 1, 1);
2224 // CHECK-LABEL: @vinsgr2vr_w(
2225 // CHECK-NEXT: entry:
2226 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vinsgr2vr.w(<4 x i32> [[_1:%.*]], i32 1, i32 1)
2227 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2229 v4i32
vinsgr2vr_w(v4i32 _1
) {
2230 return __builtin_lsx_vinsgr2vr_w(_1
, 1, 1);
2232 // CHECK-LABEL: @vinsgr2vr_d(
2233 // CHECK-NEXT: entry:
2234 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64> [[_1:%.*]], i64 1, i32 1)
2235 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2237 v2i64
vinsgr2vr_d(v2i64 _1
) {
2238 return __builtin_lsx_vinsgr2vr_d(_1
, 1, 1);
2240 // CHECK-LABEL: @vfadd_s(
2241 // CHECK-NEXT: entry:
2242 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfadd.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2243 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2245 v4f32
vfadd_s(v4f32 _1
, v4f32 _2
) {
2246 return __builtin_lsx_vfadd_s(_1
, _2
);
2248 // CHECK-LABEL: @vfadd_d(
2249 // CHECK-NEXT: entry:
2250 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfadd.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2251 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2253 v2f64
vfadd_d(v2f64 _1
, v2f64 _2
) {
2254 return __builtin_lsx_vfadd_d(_1
, _2
);
2256 // CHECK-LABEL: @vfsub_s(
2257 // CHECK-NEXT: entry:
2258 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfsub.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2259 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2261 v4f32
vfsub_s(v4f32 _1
, v4f32 _2
) {
2262 return __builtin_lsx_vfsub_s(_1
, _2
);
2264 // CHECK-LABEL: @vfsub_d(
2265 // CHECK-NEXT: entry:
2266 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfsub.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2267 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2269 v2f64
vfsub_d(v2f64 _1
, v2f64 _2
) {
2270 return __builtin_lsx_vfsub_d(_1
, _2
);
2272 // CHECK-LABEL: @vfmul_s(
2273 // CHECK-NEXT: entry:
2274 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmul.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2275 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2277 v4f32
vfmul_s(v4f32 _1
, v4f32 _2
) {
2278 return __builtin_lsx_vfmul_s(_1
, _2
);
2280 // CHECK-LABEL: @vfmul_d(
2281 // CHECK-NEXT: entry:
2282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmul.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2283 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2285 v2f64
vfmul_d(v2f64 _1
, v2f64 _2
) {
2286 return __builtin_lsx_vfmul_d(_1
, _2
);
2288 // CHECK-LABEL: @vfdiv_s(
2289 // CHECK-NEXT: entry:
2290 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfdiv.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2291 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2293 v4f32
vfdiv_s(v4f32 _1
, v4f32 _2
) {
2294 return __builtin_lsx_vfdiv_s(_1
, _2
);
2296 // CHECK-LABEL: @vfdiv_d(
2297 // CHECK-NEXT: entry:
2298 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfdiv.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2299 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2301 v2f64
vfdiv_d(v2f64 _1
, v2f64 _2
) {
2302 return __builtin_lsx_vfdiv_d(_1
, _2
);
2304 // CHECK-LABEL: @vfcvt_h_s(
2305 // CHECK-NEXT: entry:
2306 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfcvt.h.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2307 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2309 v8i16
vfcvt_h_s(v4f32 _1
, v4f32 _2
) {
2310 return __builtin_lsx_vfcvt_h_s(_1
, _2
);
2312 // CHECK-LABEL: @vfcvt_s_d(
2313 // CHECK-NEXT: entry:
2314 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvt.s.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2315 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2317 v4f32
vfcvt_s_d(v2f64 _1
, v2f64 _2
) {
2318 return __builtin_lsx_vfcvt_s_d(_1
, _2
);
2320 // CHECK-LABEL: @vfmin_s(
2321 // CHECK-NEXT: entry:
2322 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmin.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2323 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2325 v4f32
vfmin_s(v4f32 _1
, v4f32 _2
) {
2326 return __builtin_lsx_vfmin_s(_1
, _2
);
2328 // CHECK-LABEL: @vfmin_d(
2329 // CHECK-NEXT: entry:
2330 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmin.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2331 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2333 v2f64
vfmin_d(v2f64 _1
, v2f64 _2
) {
2334 return __builtin_lsx_vfmin_d(_1
, _2
);
2336 // CHECK-LABEL: @vfmina_s(
2337 // CHECK-NEXT: entry:
2338 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmina.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2339 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2341 v4f32
vfmina_s(v4f32 _1
, v4f32 _2
) {
2342 return __builtin_lsx_vfmina_s(_1
, _2
);
2344 // CHECK-LABEL: @vfmina_d(
2345 // CHECK-NEXT: entry:
2346 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmina.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2347 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2349 v2f64
vfmina_d(v2f64 _1
, v2f64 _2
) {
2350 return __builtin_lsx_vfmina_d(_1
, _2
);
2352 // CHECK-LABEL: @vfmax_s(
2353 // CHECK-NEXT: entry:
2354 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmax.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2355 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2357 v4f32
vfmax_s(v4f32 _1
, v4f32 _2
) {
2358 return __builtin_lsx_vfmax_s(_1
, _2
);
2360 // CHECK-LABEL: @vfmax_d(
2361 // CHECK-NEXT: entry:
2362 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmax.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2363 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2365 v2f64
vfmax_d(v2f64 _1
, v2f64 _2
) {
2366 return __builtin_lsx_vfmax_d(_1
, _2
);
2368 // CHECK-LABEL: @vfmaxa_s(
2369 // CHECK-NEXT: entry:
2370 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmaxa.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
2371 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2373 v4f32
vfmaxa_s(v4f32 _1
, v4f32 _2
) {
2374 return __builtin_lsx_vfmaxa_s(_1
, _2
);
2376 // CHECK-LABEL: @vfmaxa_d(
2377 // CHECK-NEXT: entry:
2378 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmaxa.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
2379 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2381 v2f64
vfmaxa_d(v2f64 _1
, v2f64 _2
) {
2382 return __builtin_lsx_vfmaxa_d(_1
, _2
);
2384 // CHECK-LABEL: @vfclass_s(
2385 // CHECK-NEXT: entry:
2386 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfclass.s(<4 x float> [[_1:%.*]])
2387 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2389 v4i32
vfclass_s(v4f32 _1
) { return __builtin_lsx_vfclass_s(_1
); }
2390 // CHECK-LABEL: @vfclass_d(
2391 // CHECK-NEXT: entry:
2392 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfclass.d(<2 x double> [[_1:%.*]])
2393 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2395 v2i64
vfclass_d(v2f64 _1
) { return __builtin_lsx_vfclass_d(_1
); }
2396 // CHECK-LABEL: @vfsqrt_s(
2397 // CHECK-NEXT: entry:
2398 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfsqrt.s(<4 x float> [[_1:%.*]])
2399 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2401 v4f32
vfsqrt_s(v4f32 _1
) { return __builtin_lsx_vfsqrt_s(_1
); }
2402 // CHECK-LABEL: @vfsqrt_d(
2403 // CHECK-NEXT: entry:
2404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfsqrt.d(<2 x double> [[_1:%.*]])
2405 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2407 v2f64
vfsqrt_d(v2f64 _1
) { return __builtin_lsx_vfsqrt_d(_1
); }
2408 // CHECK-LABEL: @vfrecip_s(
2409 // CHECK-NEXT: entry:
2410 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrecip.s(<4 x float> [[_1:%.*]])
2411 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2413 v4f32
vfrecip_s(v4f32 _1
) { return __builtin_lsx_vfrecip_s(_1
); }
2414 // CHECK-LABEL: @vfrecip_d(
2415 // CHECK-NEXT: entry:
2416 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrecip.d(<2 x double> [[_1:%.*]])
2417 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2419 v2f64
vfrecip_d(v2f64 _1
) { return __builtin_lsx_vfrecip_d(_1
); }
2420 // CHECK-LABEL: @vfrint_s(
2421 // CHECK-NEXT: entry:
2422 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrint.s(<4 x float> [[_1:%.*]])
2423 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2425 v4f32
vfrint_s(v4f32 _1
) { return __builtin_lsx_vfrint_s(_1
); }
2426 // CHECK-LABEL: @vfrint_d(
2427 // CHECK-NEXT: entry:
2428 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrint.d(<2 x double> [[_1:%.*]])
2429 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2431 v2f64
vfrint_d(v2f64 _1
) { return __builtin_lsx_vfrint_d(_1
); }
2432 // CHECK-LABEL: @vfrsqrt_s(
2433 // CHECK-NEXT: entry:
2434 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrsqrt.s(<4 x float> [[_1:%.*]])
2435 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2437 v4f32
vfrsqrt_s(v4f32 _1
) { return __builtin_lsx_vfrsqrt_s(_1
); }
2438 // CHECK-LABEL: @vfrsqrt_d(
2439 // CHECK-NEXT: entry:
2440 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrsqrt.d(<2 x double> [[_1:%.*]])
2441 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2443 v2f64
vfrsqrt_d(v2f64 _1
) { return __builtin_lsx_vfrsqrt_d(_1
); }
2444 // CHECK-LABEL: @vflogb_s(
2445 // CHECK-NEXT: entry:
2446 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vflogb.s(<4 x float> [[_1:%.*]])
2447 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2449 v4f32
vflogb_s(v4f32 _1
) { return __builtin_lsx_vflogb_s(_1
); }
2450 // CHECK-LABEL: @vflogb_d(
2451 // CHECK-NEXT: entry:
2452 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vflogb.d(<2 x double> [[_1:%.*]])
2453 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2455 v2f64
vflogb_d(v2f64 _1
) { return __builtin_lsx_vflogb_d(_1
); }
2456 // CHECK-LABEL: @vfcvth_s_h(
2457 // CHECK-NEXT: entry:
2458 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvth.s.h(<8 x i16> [[_1:%.*]])
2459 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2461 v4f32
vfcvth_s_h(v8i16 _1
) { return __builtin_lsx_vfcvth_s_h(_1
); }
2462 // CHECK-LABEL: @vfcvth_d_s(
2463 // CHECK-NEXT: entry:
2464 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfcvth.d.s(<4 x float> [[_1:%.*]])
2465 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2467 v2f64
vfcvth_d_s(v4f32 _1
) { return __builtin_lsx_vfcvth_d_s(_1
); }
2468 // CHECK-LABEL: @vfcvtl_s_h(
2469 // CHECK-NEXT: entry:
2470 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfcvtl.s.h(<8 x i16> [[_1:%.*]])
2471 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2473 v4f32
vfcvtl_s_h(v8i16 _1
) { return __builtin_lsx_vfcvtl_s_h(_1
); }
2474 // CHECK-LABEL: @vfcvtl_d_s(
2475 // CHECK-NEXT: entry:
2476 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfcvtl.d.s(<4 x float> [[_1:%.*]])
2477 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2479 v2f64
vfcvtl_d_s(v4f32 _1
) { return __builtin_lsx_vfcvtl_d_s(_1
); }
2480 // CHECK-LABEL: @vftint_w_s(
2481 // CHECK-NEXT: entry:
2482 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.w.s(<4 x float> [[_1:%.*]])
2483 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2485 v4i32
vftint_w_s(v4f32 _1
) { return __builtin_lsx_vftint_w_s(_1
); }
2486 // CHECK-LABEL: @vftint_l_d(
2487 // CHECK-NEXT: entry:
2488 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftint.l.d(<2 x double> [[_1:%.*]])
2489 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2491 v2i64
vftint_l_d(v2f64 _1
) { return __builtin_lsx_vftint_l_d(_1
); }
2492 // CHECK-LABEL: @vftint_wu_s(
2493 // CHECK-NEXT: entry:
2494 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.wu.s(<4 x float> [[_1:%.*]])
2495 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2497 v4u32
vftint_wu_s(v4f32 _1
) { return __builtin_lsx_vftint_wu_s(_1
); }
2498 // CHECK-LABEL: @vftint_lu_d(
2499 // CHECK-NEXT: entry:
2500 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftint.lu.d(<2 x double> [[_1:%.*]])
2501 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2503 v2u64
vftint_lu_d(v2f64 _1
) { return __builtin_lsx_vftint_lu_d(_1
); }
2504 // CHECK-LABEL: @vftintrz_w_s(
2505 // CHECK-NEXT: entry:
2506 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.s(<4 x float> [[_1:%.*]])
2507 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2509 v4i32
vftintrz_w_s(v4f32 _1
) { return __builtin_lsx_vftintrz_w_s(_1
); }
2510 // CHECK-LABEL: @vftintrz_l_d(
2511 // CHECK-NEXT: entry:
2512 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrz.l.d(<2 x double> [[_1:%.*]])
2513 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2515 v2i64
vftintrz_l_d(v2f64 _1
) { return __builtin_lsx_vftintrz_l_d(_1
); }
2516 // CHECK-LABEL: @vftintrz_wu_s(
2517 // CHECK-NEXT: entry:
2518 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.wu.s(<4 x float> [[_1:%.*]])
2519 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2521 v4u32
vftintrz_wu_s(v4f32 _1
) { return __builtin_lsx_vftintrz_wu_s(_1
); }
2522 // CHECK-LABEL: @vftintrz_lu_d(
2523 // CHECK-NEXT: entry:
2524 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrz.lu.d(<2 x double> [[_1:%.*]])
2525 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2527 v2u64
vftintrz_lu_d(v2f64 _1
) { return __builtin_lsx_vftintrz_lu_d(_1
); }
2528 // CHECK-LABEL: @vffint_s_w(
2529 // CHECK-NEXT: entry:
2530 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.w(<4 x i32> [[_1:%.*]])
2531 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2533 v4f32
vffint_s_w(v4i32 _1
) { return __builtin_lsx_vffint_s_w(_1
); }
2534 // CHECK-LABEL: @vffint_d_l(
2535 // CHECK-NEXT: entry:
2536 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffint.d.l(<2 x i64> [[_1:%.*]])
2537 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2539 v2f64
vffint_d_l(v2i64 _1
) { return __builtin_lsx_vffint_d_l(_1
); }
2540 // CHECK-LABEL: @vffint_s_wu(
2541 // CHECK-NEXT: entry:
2542 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.wu(<4 x i32> [[_1:%.*]])
2543 // CHECK-NEXT: ret <4 x float> [[TMP0]]
2545 v4f32
vffint_s_wu(v4u32 _1
) { return __builtin_lsx_vffint_s_wu(_1
); }
2546 // CHECK-LABEL: @vffint_d_lu(
2547 // CHECK-NEXT: entry:
2548 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffint.d.lu(<2 x i64> [[_1:%.*]])
2549 // CHECK-NEXT: ret <2 x double> [[TMP0]]
2551 v2f64
vffint_d_lu(v2u64 _1
) { return __builtin_lsx_vffint_d_lu(_1
); }
2552 // CHECK-LABEL: @vandn_v(
2553 // CHECK-NEXT: entry:
2554 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vandn.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
2555 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2557 v16u8
vandn_v(v16u8 _1
, v16u8 _2
) {
2558 return __builtin_lsx_vandn_v(_1
, _2
);
2560 // CHECK-LABEL: @vneg_b(
2561 // CHECK-NEXT: entry:
2562 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vneg.b(<16 x i8> [[_1:%.*]])
2563 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2565 v16i8
vneg_b(v16i8 _1
) { return __builtin_lsx_vneg_b(_1
); }
2566 // CHECK-LABEL: @vneg_h(
2567 // CHECK-NEXT: entry:
2568 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vneg.h(<8 x i16> [[_1:%.*]])
2569 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2571 v8i16
vneg_h(v8i16 _1
) { return __builtin_lsx_vneg_h(_1
); }
2572 // CHECK-LABEL: @vneg_w(
2573 // CHECK-NEXT: entry:
2574 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vneg.w(<4 x i32> [[_1:%.*]])
2575 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2577 v4i32
vneg_w(v4i32 _1
) { return __builtin_lsx_vneg_w(_1
); }
2578 // CHECK-LABEL: @vneg_d(
2579 // CHECK-NEXT: entry:
2580 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vneg.d(<2 x i64> [[_1:%.*]])
2581 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2583 v2i64
vneg_d(v2i64 _1
) { return __builtin_lsx_vneg_d(_1
); }
2584 // CHECK-LABEL: @vmuh_b(
2585 // CHECK-NEXT: entry:
2586 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmuh.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
2587 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2589 v16i8
vmuh_b(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vmuh_b(_1
, _2
); }
2590 // CHECK-LABEL: @vmuh_h(
2591 // CHECK-NEXT: entry:
2592 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmuh.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2593 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2595 v8i16
vmuh_h(v8i16 _1
, v8i16 _2
) { return __builtin_lsx_vmuh_h(_1
, _2
); }
2596 // CHECK-LABEL: @vmuh_w(
2597 // CHECK-NEXT: entry:
2598 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmuh.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2599 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2601 v4i32
vmuh_w(v4i32 _1
, v4i32 _2
) { return __builtin_lsx_vmuh_w(_1
, _2
); }
2602 // CHECK-LABEL: @vmuh_d(
2603 // CHECK-NEXT: entry:
2604 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmuh.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2605 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2607 v2i64
vmuh_d(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vmuh_d(_1
, _2
); }
2608 // CHECK-LABEL: @vmuh_bu(
2609 // CHECK-NEXT: entry:
2610 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmuh.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
2611 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2613 v16u8
vmuh_bu(v16u8 _1
, v16u8 _2
) {
2614 return __builtin_lsx_vmuh_bu(_1
, _2
);
2616 // CHECK-LABEL: @vmuh_hu(
2617 // CHECK-NEXT: entry:
2618 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmuh.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2619 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2621 v8u16
vmuh_hu(v8u16 _1
, v8u16 _2
) {
2622 return __builtin_lsx_vmuh_hu(_1
, _2
);
2624 // CHECK-LABEL: @vmuh_wu(
2625 // CHECK-NEXT: entry:
2626 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmuh.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2627 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2629 v4u32
vmuh_wu(v4u32 _1
, v4u32 _2
) {
2630 return __builtin_lsx_vmuh_wu(_1
, _2
);
2632 // CHECK-LABEL: @vmuh_du(
2633 // CHECK-NEXT: entry:
2634 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmuh.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2635 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2637 v2u64
vmuh_du(v2u64 _1
, v2u64 _2
) {
2638 return __builtin_lsx_vmuh_du(_1
, _2
);
2640 // CHECK-LABEL: @vsllwil_h_b(
2641 // CHECK-NEXT: entry:
2642 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsllwil.h.b(<16 x i8> [[_1:%.*]], i32 1)
2643 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2645 v8i16
vsllwil_h_b(v16i8 _1
) { return __builtin_lsx_vsllwil_h_b(_1
, 1); }
2646 // CHECK-LABEL: @vsllwil_w_h(
2647 // CHECK-NEXT: entry:
2648 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsllwil.w.h(<8 x i16> [[_1:%.*]], i32 1)
2649 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2651 v4i32
vsllwil_w_h(v8i16 _1
) { return __builtin_lsx_vsllwil_w_h(_1
, 1); }
2652 // CHECK-LABEL: @vsllwil_d_w(
2653 // CHECK-NEXT: entry:
2654 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsllwil.d.w(<4 x i32> [[_1:%.*]], i32 1)
2655 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2657 v2i64
vsllwil_d_w(v4i32 _1
) { return __builtin_lsx_vsllwil_d_w(_1
, 1); }
2658 // CHECK-LABEL: @vsllwil_hu_bu(
2659 // CHECK-NEXT: entry:
2660 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsllwil.hu.bu(<16 x i8> [[_1:%.*]], i32 1)
2661 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2663 v8u16
vsllwil_hu_bu(v16u8 _1
) {
2664 return __builtin_lsx_vsllwil_hu_bu(_1
, 1);
2666 // CHECK-LABEL: @vsllwil_wu_hu(
2667 // CHECK-NEXT: entry:
2668 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsllwil.wu.hu(<8 x i16> [[_1:%.*]], i32 1)
2669 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2671 v4u32
vsllwil_wu_hu(v8u16 _1
) {
2672 return __builtin_lsx_vsllwil_wu_hu(_1
, 1);
2674 // CHECK-LABEL: @vsllwil_du_wu(
2675 // CHECK-NEXT: entry:
2676 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsllwil.du.wu(<4 x i32> [[_1:%.*]], i32 1)
2677 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2679 v2u64
vsllwil_du_wu(v4u32 _1
) {
2680 return __builtin_lsx_vsllwil_du_wu(_1
, 1);
2682 // CHECK-LABEL: @vsran_b_h(
2683 // CHECK-NEXT: entry:
2684 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsran.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2685 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2687 v16i8
vsran_b_h(v8i16 _1
, v8i16 _2
) {
2688 return __builtin_lsx_vsran_b_h(_1
, _2
);
2690 // CHECK-LABEL: @vsran_h_w(
2691 // CHECK-NEXT: entry:
2692 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsran.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2693 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2695 v8i16
vsran_h_w(v4i32 _1
, v4i32 _2
) {
2696 return __builtin_lsx_vsran_h_w(_1
, _2
);
2698 // CHECK-LABEL: @vsran_w_d(
2699 // CHECK-NEXT: entry:
2700 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsran.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2701 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2703 v4i32
vsran_w_d(v2i64 _1
, v2i64 _2
) {
2704 return __builtin_lsx_vsran_w_d(_1
, _2
);
2706 // CHECK-LABEL: @vssran_b_h(
2707 // CHECK-NEXT: entry:
2708 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssran.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2709 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2711 v16i8
vssran_b_h(v8i16 _1
, v8i16 _2
) {
2712 return __builtin_lsx_vssran_b_h(_1
, _2
);
2714 // CHECK-LABEL: @vssran_h_w(
2715 // CHECK-NEXT: entry:
2716 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssran.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2717 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2719 v8i16
vssran_h_w(v4i32 _1
, v4i32 _2
) {
2720 return __builtin_lsx_vssran_h_w(_1
, _2
);
2722 // CHECK-LABEL: @vssran_w_d(
2723 // CHECK-NEXT: entry:
2724 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssran.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2725 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2727 v4i32
vssran_w_d(v2i64 _1
, v2i64 _2
) {
2728 return __builtin_lsx_vssran_w_d(_1
, _2
);
2730 // CHECK-LABEL: @vssran_bu_h(
2731 // CHECK-NEXT: entry:
2732 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssran.bu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2733 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2735 v16u8
vssran_bu_h(v8u16 _1
, v8u16 _2
) {
2736 return __builtin_lsx_vssran_bu_h(_1
, _2
);
2738 // CHECK-LABEL: @vssran_hu_w(
2739 // CHECK-NEXT: entry:
2740 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssran.hu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2741 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2743 v8u16
vssran_hu_w(v4u32 _1
, v4u32 _2
) {
2744 return __builtin_lsx_vssran_hu_w(_1
, _2
);
2746 // CHECK-LABEL: @vssran_wu_d(
2747 // CHECK-NEXT: entry:
2748 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssran.wu.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2749 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2751 v4u32
vssran_wu_d(v2u64 _1
, v2u64 _2
) {
2752 return __builtin_lsx_vssran_wu_d(_1
, _2
);
2754 // CHECK-LABEL: @vsrarn_b_h(
2755 // CHECK-NEXT: entry:
2756 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrarn.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2757 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2759 v16i8
vsrarn_b_h(v8i16 _1
, v8i16 _2
) {
2760 return __builtin_lsx_vsrarn_b_h(_1
, _2
);
2762 // CHECK-LABEL: @vsrarn_h_w(
2763 // CHECK-NEXT: entry:
2764 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrarn.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2765 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2767 v8i16
vsrarn_h_w(v4i32 _1
, v4i32 _2
) {
2768 return __builtin_lsx_vsrarn_h_w(_1
, _2
);
2770 // CHECK-LABEL: @vsrarn_w_d(
2771 // CHECK-NEXT: entry:
2772 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrarn.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2773 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2775 v4i32
vsrarn_w_d(v2i64 _1
, v2i64 _2
) {
2776 return __builtin_lsx_vsrarn_w_d(_1
, _2
);
2778 // CHECK-LABEL: @vssrarn_b_h(
2779 // CHECK-NEXT: entry:
2780 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarn.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2781 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2783 v16i8
vssrarn_b_h(v8i16 _1
, v8i16 _2
) {
2784 return __builtin_lsx_vssrarn_b_h(_1
, _2
);
2786 // CHECK-LABEL: @vssrarn_h_w(
2787 // CHECK-NEXT: entry:
2788 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarn.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2789 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2791 v8i16
vssrarn_h_w(v4i32 _1
, v4i32 _2
) {
2792 return __builtin_lsx_vssrarn_h_w(_1
, _2
);
2794 // CHECK-LABEL: @vssrarn_w_d(
2795 // CHECK-NEXT: entry:
2796 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarn.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2797 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2799 v4i32
vssrarn_w_d(v2i64 _1
, v2i64 _2
) {
2800 return __builtin_lsx_vssrarn_w_d(_1
, _2
);
2802 // CHECK-LABEL: @vssrarn_bu_h(
2803 // CHECK-NEXT: entry:
2804 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarn.bu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2805 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2807 v16u8
vssrarn_bu_h(v8u16 _1
, v8u16 _2
) {
2808 return __builtin_lsx_vssrarn_bu_h(_1
, _2
);
2810 // CHECK-LABEL: @vssrarn_hu_w(
2811 // CHECK-NEXT: entry:
2812 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarn.hu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2813 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2815 v8u16
vssrarn_hu_w(v4u32 _1
, v4u32 _2
) {
2816 return __builtin_lsx_vssrarn_hu_w(_1
, _2
);
2818 // CHECK-LABEL: @vssrarn_wu_d(
2819 // CHECK-NEXT: entry:
2820 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarn.wu.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2821 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2823 v4u32
vssrarn_wu_d(v2u64 _1
, v2u64 _2
) {
2824 return __builtin_lsx_vssrarn_wu_d(_1
, _2
);
2826 // CHECK-LABEL: @vsrln_b_h(
2827 // CHECK-NEXT: entry:
2828 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrln.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2829 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2831 v16i8
vsrln_b_h(v8i16 _1
, v8i16 _2
) {
2832 return __builtin_lsx_vsrln_b_h(_1
, _2
);
2834 // CHECK-LABEL: @vsrln_h_w(
2835 // CHECK-NEXT: entry:
2836 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrln.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2837 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2839 v8i16
vsrln_h_w(v4i32 _1
, v4i32 _2
) {
2840 return __builtin_lsx_vsrln_h_w(_1
, _2
);
2842 // CHECK-LABEL: @vsrln_w_d(
2843 // CHECK-NEXT: entry:
2844 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrln.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2845 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2847 v4i32
vsrln_w_d(v2i64 _1
, v2i64 _2
) {
2848 return __builtin_lsx_vsrln_w_d(_1
, _2
);
2850 // CHECK-LABEL: @vssrln_bu_h(
2851 // CHECK-NEXT: entry:
2852 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrln.bu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2853 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2855 v16u8
vssrln_bu_h(v8u16 _1
, v8u16 _2
) {
2856 return __builtin_lsx_vssrln_bu_h(_1
, _2
);
2858 // CHECK-LABEL: @vssrln_hu_w(
2859 // CHECK-NEXT: entry:
2860 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrln.hu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2861 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2863 v8u16
vssrln_hu_w(v4u32 _1
, v4u32 _2
) {
2864 return __builtin_lsx_vssrln_hu_w(_1
, _2
);
2866 // CHECK-LABEL: @vssrln_wu_d(
2867 // CHECK-NEXT: entry:
2868 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrln.wu.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2869 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2871 v4u32
vssrln_wu_d(v2u64 _1
, v2u64 _2
) {
2872 return __builtin_lsx_vssrln_wu_d(_1
, _2
);
2874 // CHECK-LABEL: @vsrlrn_b_h(
2875 // CHECK-NEXT: entry:
2876 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlrn.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2877 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2879 v16i8
vsrlrn_b_h(v8i16 _1
, v8i16 _2
) {
2880 return __builtin_lsx_vsrlrn_b_h(_1
, _2
);
2882 // CHECK-LABEL: @vsrlrn_h_w(
2883 // CHECK-NEXT: entry:
2884 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlrn.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2885 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2887 v8i16
vsrlrn_h_w(v4i32 _1
, v4i32 _2
) {
2888 return __builtin_lsx_vsrlrn_h_w(_1
, _2
);
2890 // CHECK-LABEL: @vsrlrn_w_d(
2891 // CHECK-NEXT: entry:
2892 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlrn.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2893 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2895 v4i32
vsrlrn_w_d(v2i64 _1
, v2i64 _2
) {
2896 return __builtin_lsx_vsrlrn_w_d(_1
, _2
);
2898 // CHECK-LABEL: @vssrlrn_bu_h(
2899 // CHECK-NEXT: entry:
2900 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrn.bu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
2901 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2903 v16u8
vssrlrn_bu_h(v8u16 _1
, v8u16 _2
) {
2904 return __builtin_lsx_vssrlrn_bu_h(_1
, _2
);
2906 // CHECK-LABEL: @vssrlrn_hu_w(
2907 // CHECK-NEXT: entry:
2908 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrn.hu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
2909 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2911 v8u16
vssrlrn_hu_w(v4u32 _1
, v4u32 _2
) {
2912 return __builtin_lsx_vssrlrn_hu_w(_1
, _2
);
2914 // CHECK-LABEL: @vssrlrn_wu_d(
2915 // CHECK-NEXT: entry:
2916 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrn.wu.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
2917 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2919 v4u32
vssrlrn_wu_d(v2u64 _1
, v2u64 _2
) {
2920 return __builtin_lsx_vssrlrn_wu_d(_1
, _2
);
2922 // CHECK-LABEL: @vfrstpi_b(
2923 // CHECK-NEXT: entry:
2924 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vfrstpi.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
2925 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2927 v16i8
vfrstpi_b(v16i8 _1
, v16i8 _2
) {
2928 return __builtin_lsx_vfrstpi_b(_1
, _2
, 1);
2930 // CHECK-LABEL: @vfrstpi_h(
2931 // CHECK-NEXT: entry:
2932 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfrstpi.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
2933 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2935 v8i16
vfrstpi_h(v8i16 _1
, v8i16 _2
) {
2936 return __builtin_lsx_vfrstpi_h(_1
, _2
, 1);
2938 // CHECK-LABEL: @vfrstp_b(
2939 // CHECK-NEXT: entry:
2940 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vfrstp.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
2941 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2943 v16i8
vfrstp_b(v16i8 _1
, v16i8 _2
, v16i8 _3
) {
2944 return __builtin_lsx_vfrstp_b(_1
, _2
, _3
);
2946 // CHECK-LABEL: @vfrstp_h(
2947 // CHECK-NEXT: entry:
2948 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vfrstp.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
2949 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2951 v8i16
vfrstp_h(v8i16 _1
, v8i16 _2
, v8i16 _3
) {
2952 return __builtin_lsx_vfrstp_h(_1
, _2
, _3
);
2954 // CHECK-LABEL: @vshuf4i_d(
2955 // CHECK-NEXT: entry:
2956 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vshuf4i.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
2957 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
2959 v2i64
vshuf4i_d(v2i64 _1
, v2i64 _2
) {
2960 return __builtin_lsx_vshuf4i_d(_1
, _2
, 1);
2962 // CHECK-LABEL: @vbsrl_v(
2963 // CHECK-NEXT: entry:
2964 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbsrl.v(<16 x i8> [[_1:%.*]], i32 1)
2965 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2967 v16i8
vbsrl_v(v16i8 _1
) { return __builtin_lsx_vbsrl_v(_1
, 1); }
2968 // CHECK-LABEL: @vbsll_v(
2969 // CHECK-NEXT: entry:
2970 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vbsll.v(<16 x i8> [[_1:%.*]], i32 1)
2971 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2973 v16i8
vbsll_v(v16i8 _1
) { return __builtin_lsx_vbsll_v(_1
, 1); }
2974 // CHECK-LABEL: @vextrins_b(
2975 // CHECK-NEXT: entry:
2976 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vextrins.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
2977 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
2979 v16i8
vextrins_b(v16i8 _1
, v16i8 _2
) {
2980 return __builtin_lsx_vextrins_b(_1
, _2
, 1);
2982 // CHECK-LABEL: @vextrins_h(
2983 // CHECK-NEXT: entry:
2984 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vextrins.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
2985 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
2987 v8i16
vextrins_h(v8i16 _1
, v8i16 _2
) {
2988 return __builtin_lsx_vextrins_h(_1
, _2
, 1);
2990 // CHECK-LABEL: @vextrins_w(
2991 // CHECK-NEXT: entry:
2992 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vextrins.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
2993 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
2995 v4i32
vextrins_w(v4i32 _1
, v4i32 _2
) {
2996 return __builtin_lsx_vextrins_w(_1
, _2
, 1);
2998 // CHECK-LABEL: @vextrins_d(
2999 // CHECK-NEXT: entry:
3000 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextrins.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
3001 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3003 v2i64
vextrins_d(v2i64 _1
, v2i64 _2
) {
3004 return __builtin_lsx_vextrins_d(_1
, _2
, 1);
3006 // CHECK-LABEL: @vmskltz_b(
3007 // CHECK-NEXT: entry:
3008 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmskltz.b(<16 x i8> [[_1:%.*]])
3009 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
3011 v16i8
vmskltz_b(v16i8 _1
) { return __builtin_lsx_vmskltz_b(_1
); }
3012 // CHECK-LABEL: @vmskltz_h(
3013 // CHECK-NEXT: entry:
3014 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmskltz.h(<8 x i16> [[_1:%.*]])
3015 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3017 v8i16
vmskltz_h(v8i16 _1
) { return __builtin_lsx_vmskltz_h(_1
); }
3018 // CHECK-LABEL: @vmskltz_w(
3019 // CHECK-NEXT: entry:
3020 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmskltz.w(<4 x i32> [[_1:%.*]])
3021 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3023 v4i32
vmskltz_w(v4i32 _1
) { return __builtin_lsx_vmskltz_w(_1
); }
3024 // CHECK-LABEL: @vmskltz_d(
3025 // CHECK-NEXT: entry:
3026 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmskltz.d(<2 x i64> [[_1:%.*]])
3027 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3029 v2i64
vmskltz_d(v2i64 _1
) { return __builtin_lsx_vmskltz_d(_1
); }
3030 // CHECK-LABEL: @vsigncov_b(
3031 // CHECK-NEXT: entry:
3032 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsigncov.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3033 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
3035 v16i8
vsigncov_b(v16i8 _1
, v16i8 _2
) {
3036 return __builtin_lsx_vsigncov_b(_1
, _2
);
3038 // CHECK-LABEL: @vsigncov_h(
3039 // CHECK-NEXT: entry:
3040 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsigncov.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3041 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3043 v8i16
vsigncov_h(v8i16 _1
, v8i16 _2
) {
3044 return __builtin_lsx_vsigncov_h(_1
, _2
);
3046 // CHECK-LABEL: @vsigncov_w(
3047 // CHECK-NEXT: entry:
3048 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsigncov.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3049 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3051 v4i32
vsigncov_w(v4i32 _1
, v4i32 _2
) {
3052 return __builtin_lsx_vsigncov_w(_1
, _2
);
3054 // CHECK-LABEL: @vsigncov_d(
3055 // CHECK-NEXT: entry:
3056 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsigncov.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3057 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3059 v2i64
vsigncov_d(v2i64 _1
, v2i64 _2
) {
3060 return __builtin_lsx_vsigncov_d(_1
, _2
);
3062 // CHECK-LABEL: @vfmadd_s(
3063 // CHECK-NEXT: entry:
3064 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmadd.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]], <4 x float> [[_3:%.*]])
3065 // CHECK-NEXT: ret <4 x float> [[TMP0]]
3067 v4f32
vfmadd_s(v4f32 _1
, v4f32 _2
, v4f32 _3
) {
3068 return __builtin_lsx_vfmadd_s(_1
, _2
, _3
);
3070 // CHECK-LABEL: @vfmadd_d(
3071 // CHECK-NEXT: entry:
3072 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmadd.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]], <2 x double> [[_3:%.*]])
3073 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3075 v2f64
vfmadd_d(v2f64 _1
, v2f64 _2
, v2f64 _3
) {
3076 return __builtin_lsx_vfmadd_d(_1
, _2
, _3
);
3078 // CHECK-LABEL: @vfmsub_s(
3079 // CHECK-NEXT: entry:
3080 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfmsub.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]], <4 x float> [[_3:%.*]])
3081 // CHECK-NEXT: ret <4 x float> [[TMP0]]
3083 v4f32
vfmsub_s(v4f32 _1
, v4f32 _2
, v4f32 _3
) {
3084 return __builtin_lsx_vfmsub_s(_1
, _2
, _3
);
3086 // CHECK-LABEL: @vfmsub_d(
3087 // CHECK-NEXT: entry:
3088 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfmsub.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]], <2 x double> [[_3:%.*]])
3089 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3091 v2f64
vfmsub_d(v2f64 _1
, v2f64 _2
, v2f64 _3
) {
3092 return __builtin_lsx_vfmsub_d(_1
, _2
, _3
);
3094 // CHECK-LABEL: @vfnmadd_s(
3095 // CHECK-NEXT: entry:
3096 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfnmadd.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]], <4 x float> [[_3:%.*]])
3097 // CHECK-NEXT: ret <4 x float> [[TMP0]]
3099 v4f32
vfnmadd_s(v4f32 _1
, v4f32 _2
, v4f32 _3
) {
3100 return __builtin_lsx_vfnmadd_s(_1
, _2
, _3
);
3102 // CHECK-LABEL: @vfnmadd_d(
3103 // CHECK-NEXT: entry:
3104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfnmadd.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]], <2 x double> [[_3:%.*]])
3105 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3107 v2f64
vfnmadd_d(v2f64 _1
, v2f64 _2
, v2f64 _3
) {
3108 return __builtin_lsx_vfnmadd_d(_1
, _2
, _3
);
3110 // CHECK-LABEL: @vfnmsub_s(
3111 // CHECK-NEXT: entry:
3112 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfnmsub.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]], <4 x float> [[_3:%.*]])
3113 // CHECK-NEXT: ret <4 x float> [[TMP0]]
3115 v4f32
vfnmsub_s(v4f32 _1
, v4f32 _2
, v4f32 _3
) {
3116 return __builtin_lsx_vfnmsub_s(_1
, _2
, _3
);
3118 // CHECK-LABEL: @vfnmsub_d(
3119 // CHECK-NEXT: entry:
3120 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfnmsub.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]], <2 x double> [[_3:%.*]])
3121 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3123 v2f64
vfnmsub_d(v2f64 _1
, v2f64 _2
, v2f64 _3
) {
3124 return __builtin_lsx_vfnmsub_d(_1
, _2
, _3
);
3126 // CHECK-LABEL: @vftintrne_w_s(
3127 // CHECK-NEXT: entry:
3128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.s(<4 x float> [[_1:%.*]])
3129 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3131 v4i32
vftintrne_w_s(v4f32 _1
) { return __builtin_lsx_vftintrne_w_s(_1
); }
3132 // CHECK-LABEL: @vftintrne_l_d(
3133 // CHECK-NEXT: entry:
3134 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrne.l.d(<2 x double> [[_1:%.*]])
3135 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3137 v2i64
vftintrne_l_d(v2f64 _1
) { return __builtin_lsx_vftintrne_l_d(_1
); }
3138 // CHECK-LABEL: @vftintrp_w_s(
3139 // CHECK-NEXT: entry:
3140 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.s(<4 x float> [[_1:%.*]])
3141 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3143 v4i32
vftintrp_w_s(v4f32 _1
) { return __builtin_lsx_vftintrp_w_s(_1
); }
3144 // CHECK-LABEL: @vftintrp_l_d(
3145 // CHECK-NEXT: entry:
3146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrp.l.d(<2 x double> [[_1:%.*]])
3147 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3149 v2i64
vftintrp_l_d(v2f64 _1
) { return __builtin_lsx_vftintrp_l_d(_1
); }
3150 // CHECK-LABEL: @vftintrm_w_s(
3151 // CHECK-NEXT: entry:
3152 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.s(<4 x float> [[_1:%.*]])
3153 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3155 v4i32
vftintrm_w_s(v4f32 _1
) { return __builtin_lsx_vftintrm_w_s(_1
); }
3156 // CHECK-LABEL: @vftintrm_l_d(
3157 // CHECK-NEXT: entry:
3158 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrm.l.d(<2 x double> [[_1:%.*]])
3159 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3161 v2i64
vftintrm_l_d(v2f64 _1
) { return __builtin_lsx_vftintrm_l_d(_1
); }
3162 // CHECK-LABEL: @vftint_w_d(
3163 // CHECK-NEXT: entry:
3164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftint.w.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
3165 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3167 v4i32
vftint_w_d(v2f64 _1
, v2f64 _2
) {
3168 return __builtin_lsx_vftint_w_d(_1
, _2
);
3170 // CHECK-LABEL: @vffint_s_l(
3171 // CHECK-NEXT: entry:
3172 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vffint.s.l(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3173 // CHECK-NEXT: ret <4 x float> [[TMP0]]
3175 v4f32
vffint_s_l(v2i64 _1
, v2i64 _2
) {
3176 return __builtin_lsx_vffint_s_l(_1
, _2
);
3178 // CHECK-LABEL: @vftintrz_w_d(
3179 // CHECK-NEXT: entry:
3180 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
3181 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3183 v4i32
vftintrz_w_d(v2f64 _1
, v2f64 _2
) {
3184 return __builtin_lsx_vftintrz_w_d(_1
, _2
);
3186 // CHECK-LABEL: @vftintrp_w_d(
3187 // CHECK-NEXT: entry:
3188 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
3189 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3191 v4i32
vftintrp_w_d(v2f64 _1
, v2f64 _2
) {
3192 return __builtin_lsx_vftintrp_w_d(_1
, _2
);
3194 // CHECK-LABEL: @vftintrm_w_d(
3195 // CHECK-NEXT: entry:
3196 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
3197 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3199 v4i32
vftintrm_w_d(v2f64 _1
, v2f64 _2
) {
3200 return __builtin_lsx_vftintrm_w_d(_1
, _2
);
3202 // CHECK-LABEL: @vftintrne_w_d(
3203 // CHECK-NEXT: entry:
3204 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
3205 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3207 v4i32
vftintrne_w_d(v2f64 _1
, v2f64 _2
) {
3208 return __builtin_lsx_vftintrne_w_d(_1
, _2
);
3210 // CHECK-LABEL: @vftintl_l_s(
3211 // CHECK-NEXT: entry:
3212 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintl.l.s(<4 x float> [[_1:%.*]])
3213 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3215 v2i64
vftintl_l_s(v4f32 _1
) { return __builtin_lsx_vftintl_l_s(_1
); }
3216 // CHECK-LABEL: @vftinth_l_s(
3217 // CHECK-NEXT: entry:
3218 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftinth.l.s(<4 x float> [[_1:%.*]])
3219 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3221 v2i64
vftinth_l_s(v4f32 _1
) { return __builtin_lsx_vftinth_l_s(_1
); }
3222 // CHECK-LABEL: @vffinth_d_w(
3223 // CHECK-NEXT: entry:
3224 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffinth.d.w(<4 x i32> [[_1:%.*]])
3225 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3227 v2f64
vffinth_d_w(v4i32 _1
) { return __builtin_lsx_vffinth_d_w(_1
); }
3228 // CHECK-LABEL: @vffintl_d_w(
3229 // CHECK-NEXT: entry:
3230 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vffintl.d.w(<4 x i32> [[_1:%.*]])
3231 // CHECK-NEXT: ret <2 x double> [[TMP0]]
3233 v2f64
vffintl_d_w(v4i32 _1
) { return __builtin_lsx_vffintl_d_w(_1
); }
3234 // CHECK-LABEL: @vftintrzl_l_s(
3235 // CHECK-NEXT: entry:
3236 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrzl.l.s(<4 x float> [[_1:%.*]])
3237 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3239 v2i64
vftintrzl_l_s(v4f32 _1
) { return __builtin_lsx_vftintrzl_l_s(_1
); }
3240 // CHECK-LABEL: @vftintrzh_l_s(
3241 // CHECK-NEXT: entry:
3242 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrzh.l.s(<4 x float> [[_1:%.*]])
3243 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3245 v2i64
vftintrzh_l_s(v4f32 _1
) { return __builtin_lsx_vftintrzh_l_s(_1
); }
3246 // CHECK-LABEL: @vftintrpl_l_s(
3247 // CHECK-NEXT: entry:
3248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrpl.l.s(<4 x float> [[_1:%.*]])
3249 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3251 v2i64
vftintrpl_l_s(v4f32 _1
) { return __builtin_lsx_vftintrpl_l_s(_1
); }
3252 // CHECK-LABEL: @vftintrph_l_s(
3253 // CHECK-NEXT: entry:
3254 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrph.l.s(<4 x float> [[_1:%.*]])
3255 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3257 v2i64
vftintrph_l_s(v4f32 _1
) { return __builtin_lsx_vftintrph_l_s(_1
); }
3258 // CHECK-LABEL: @vftintrml_l_s(
3259 // CHECK-NEXT: entry:
3260 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrml.l.s(<4 x float> [[_1:%.*]])
3261 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3263 v2i64
vftintrml_l_s(v4f32 _1
) { return __builtin_lsx_vftintrml_l_s(_1
); }
3264 // CHECK-LABEL: @vftintrmh_l_s(
3265 // CHECK-NEXT: entry:
3266 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrmh.l.s(<4 x float> [[_1:%.*]])
3267 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3269 v2i64
vftintrmh_l_s(v4f32 _1
) { return __builtin_lsx_vftintrmh_l_s(_1
); }
3270 // CHECK-LABEL: @vftintrnel_l_s(
3271 // CHECK-NEXT: entry:
3272 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrnel.l.s(<4 x float> [[_1:%.*]])
3273 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3275 v2i64
vftintrnel_l_s(v4f32 _1
) {
3276 return __builtin_lsx_vftintrnel_l_s(_1
);
3278 // CHECK-LABEL: @vftintrneh_l_s(
3279 // CHECK-NEXT: entry:
3280 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vftintrneh.l.s(<4 x float> [[_1:%.*]])
3281 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3283 v2i64
vftintrneh_l_s(v4f32 _1
) {
3284 return __builtin_lsx_vftintrneh_l_s(_1
);
3286 // CHECK-LABEL: @vfrintrne_s(
3287 // CHECK-NEXT: entry:
3288 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrne.s(<4 x float> [[_1:%.*]])
3289 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <4 x i32>
3290 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
3292 v4i32
vfrintrne_s(v4f32 _1
) { return __builtin_lsx_vfrintrne_s(_1
); }
3293 // CHECK-LABEL: @vfrintrne_d(
3294 // CHECK-NEXT: entry:
3295 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrne.d(<2 x double> [[_1:%.*]])
3296 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <2 x i64>
3297 // CHECK-NEXT: ret <2 x i64> [[TMP1]]
3299 v2i64
vfrintrne_d(v2f64 _1
) { return __builtin_lsx_vfrintrne_d(_1
); }
3300 // CHECK-LABEL: @vfrintrz_s(
3301 // CHECK-NEXT: entry:
3302 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrz.s(<4 x float> [[_1:%.*]])
3303 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <4 x i32>
3304 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
3306 v4i32
vfrintrz_s(v4f32 _1
) { return __builtin_lsx_vfrintrz_s(_1
); }
3307 // CHECK-LABEL: @vfrintrz_d(
3308 // CHECK-NEXT: entry:
3309 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrz.d(<2 x double> [[_1:%.*]])
3310 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <2 x i64>
3311 // CHECK-NEXT: ret <2 x i64> [[TMP1]]
3313 v2i64
vfrintrz_d(v2f64 _1
) { return __builtin_lsx_vfrintrz_d(_1
); }
3314 // CHECK-LABEL: @vfrintrp_s(
3315 // CHECK-NEXT: entry:
3316 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrp.s(<4 x float> [[_1:%.*]])
3317 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <4 x i32>
3318 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
3320 v4i32
vfrintrp_s(v4f32 _1
) { return __builtin_lsx_vfrintrp_s(_1
); }
3321 // CHECK-LABEL: @vfrintrp_d(
3322 // CHECK-NEXT: entry:
3323 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrp.d(<2 x double> [[_1:%.*]])
3324 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <2 x i64>
3325 // CHECK-NEXT: ret <2 x i64> [[TMP1]]
3327 v2i64
vfrintrp_d(v2f64 _1
) { return __builtin_lsx_vfrintrp_d(_1
); }
3328 // CHECK-LABEL: @vfrintrm_s(
3329 // CHECK-NEXT: entry:
3330 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.loongarch.lsx.vfrintrm.s(<4 x float> [[_1:%.*]])
3331 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <4 x i32>
3332 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
3334 v4i32
vfrintrm_s(v4f32 _1
) { return __builtin_lsx_vfrintrm_s(_1
); }
3335 // CHECK-LABEL: @vfrintrm_d(
3336 // CHECK-NEXT: entry:
3337 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x double> @llvm.loongarch.lsx.vfrintrm.d(<2 x double> [[_1:%.*]])
3338 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <2 x i64>
3339 // CHECK-NEXT: ret <2 x i64> [[TMP1]]
3341 v2i64
vfrintrm_d(v2f64 _1
) { return __builtin_lsx_vfrintrm_d(_1
); }
3342 // CHECK-LABEL: @vstelm_b(
3343 // CHECK-NEXT: entry:
3344 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> [[_1:%.*]], ptr [[_2:%.*]], i32 1, i32 1)
3345 // CHECK-NEXT: ret void
3347 void vstelm_b(v16i8 _1
, void *_2
) {
3348 return __builtin_lsx_vstelm_b(_1
, _2
, 1, 1);
3350 // CHECK-LABEL: @vstelm_h(
3351 // CHECK-NEXT: entry:
3352 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> [[_1:%.*]], ptr [[_2:%.*]], i32 2, i32 1)
3353 // CHECK-NEXT: ret void
3355 void vstelm_h(v8i16 _1
, void *_2
) {
3356 return __builtin_lsx_vstelm_h(_1
, _2
, 2, 1);
3358 // CHECK-LABEL: @vstelm_w(
3359 // CHECK-NEXT: entry:
3360 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> [[_1:%.*]], ptr [[_2:%.*]], i32 4, i32 1)
3361 // CHECK-NEXT: ret void
3363 void vstelm_w(v4i32 _1
, void *_2
) {
3364 return __builtin_lsx_vstelm_w(_1
, _2
, 4, 1);
3366 // CHECK-LABEL: @vstelm_d(
3367 // CHECK-NEXT: entry:
3368 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> [[_1:%.*]], ptr [[_2:%.*]], i32 8, i32 1)
3369 // CHECK-NEXT: ret void
3371 void vstelm_d(v2i64 _1
, void *_2
) {
3372 return __builtin_lsx_vstelm_d(_1
, _2
, 8, 1);
3374 // CHECK-LABEL: @vaddwev_d_w(
3375 // CHECK-NEXT: entry:
3376 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3377 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3379 v2i64
vaddwev_d_w(v4i32 _1
, v4i32 _2
) {
3380 return __builtin_lsx_vaddwev_d_w(_1
, _2
);
3382 // CHECK-LABEL: @vaddwev_w_h(
3383 // CHECK-NEXT: entry:
3384 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3385 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3387 v4i32
vaddwev_w_h(v8i16 _1
, v8i16 _2
) {
3388 return __builtin_lsx_vaddwev_w_h(_1
, _2
);
3390 // CHECK-LABEL: @vaddwev_h_b(
3391 // CHECK-NEXT: entry:
3392 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3393 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3395 v8i16
vaddwev_h_b(v16i8 _1
, v16i8 _2
) {
3396 return __builtin_lsx_vaddwev_h_b(_1
, _2
);
3398 // CHECK-LABEL: @vaddwod_d_w(
3399 // CHECK-NEXT: entry:
3400 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3401 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3403 v2i64
vaddwod_d_w(v4i32 _1
, v4i32 _2
) {
3404 return __builtin_lsx_vaddwod_d_w(_1
, _2
);
3406 // CHECK-LABEL: @vaddwod_w_h(
3407 // CHECK-NEXT: entry:
3408 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3409 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3411 v4i32
vaddwod_w_h(v8i16 _1
, v8i16 _2
) {
3412 return __builtin_lsx_vaddwod_w_h(_1
, _2
);
3414 // CHECK-LABEL: @vaddwod_h_b(
3415 // CHECK-NEXT: entry:
3416 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3417 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3419 v8i16
vaddwod_h_b(v16i8 _1
, v16i8 _2
) {
3420 return __builtin_lsx_vaddwod_h_b(_1
, _2
);
3422 // CHECK-LABEL: @vaddwev_d_wu(
3423 // CHECK-NEXT: entry:
3424 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3425 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3427 v2i64
vaddwev_d_wu(v4u32 _1
, v4u32 _2
) {
3428 return __builtin_lsx_vaddwev_d_wu(_1
, _2
);
3430 // CHECK-LABEL: @vaddwev_w_hu(
3431 // CHECK-NEXT: entry:
3432 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3433 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3435 v4i32
vaddwev_w_hu(v8u16 _1
, v8u16 _2
) {
3436 return __builtin_lsx_vaddwev_w_hu(_1
, _2
);
3438 // CHECK-LABEL: @vaddwev_h_bu(
3439 // CHECK-NEXT: entry:
3440 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3441 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3443 v8i16
vaddwev_h_bu(v16u8 _1
, v16u8 _2
) {
3444 return __builtin_lsx_vaddwev_h_bu(_1
, _2
);
3446 // CHECK-LABEL: @vaddwod_d_wu(
3447 // CHECK-NEXT: entry:
3448 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3449 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3451 v2i64
vaddwod_d_wu(v4u32 _1
, v4u32 _2
) {
3452 return __builtin_lsx_vaddwod_d_wu(_1
, _2
);
3454 // CHECK-LABEL: @vaddwod_w_hu(
3455 // CHECK-NEXT: entry:
3456 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3457 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3459 v4i32
vaddwod_w_hu(v8u16 _1
, v8u16 _2
) {
3460 return __builtin_lsx_vaddwod_w_hu(_1
, _2
);
3462 // CHECK-LABEL: @vaddwod_h_bu(
3463 // CHECK-NEXT: entry:
3464 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3465 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3467 v8i16
vaddwod_h_bu(v16u8 _1
, v16u8 _2
) {
3468 return __builtin_lsx_vaddwod_h_bu(_1
, _2
);
3470 // CHECK-LABEL: @vaddwev_d_wu_w(
3471 // CHECK-NEXT: entry:
3472 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3473 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3475 v2i64
vaddwev_d_wu_w(v4u32 _1
, v4i32 _2
) {
3476 return __builtin_lsx_vaddwev_d_wu_w(_1
, _2
);
3478 // CHECK-LABEL: @vaddwev_w_hu_h(
3479 // CHECK-NEXT: entry:
3480 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3481 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3483 v4i32
vaddwev_w_hu_h(v8u16 _1
, v8i16 _2
) {
3484 return __builtin_lsx_vaddwev_w_hu_h(_1
, _2
);
3486 // CHECK-LABEL: @vaddwev_h_bu_b(
3487 // CHECK-NEXT: entry:
3488 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3489 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3491 v8i16
vaddwev_h_bu_b(v16u8 _1
, v16i8 _2
) {
3492 return __builtin_lsx_vaddwev_h_bu_b(_1
, _2
);
3494 // CHECK-LABEL: @vaddwod_d_wu_w(
3495 // CHECK-NEXT: entry:
3496 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3497 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3499 v2i64
vaddwod_d_wu_w(v4u32 _1
, v4i32 _2
) {
3500 return __builtin_lsx_vaddwod_d_wu_w(_1
, _2
);
3502 // CHECK-LABEL: @vaddwod_w_hu_h(
3503 // CHECK-NEXT: entry:
3504 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3505 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3507 v4i32
vaddwod_w_hu_h(v8u16 _1
, v8i16 _2
) {
3508 return __builtin_lsx_vaddwod_w_hu_h(_1
, _2
);
3510 // CHECK-LABEL: @vaddwod_h_bu_b(
3511 // CHECK-NEXT: entry:
3512 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3513 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3515 v8i16
vaddwod_h_bu_b(v16u8 _1
, v16i8 _2
) {
3516 return __builtin_lsx_vaddwod_h_bu_b(_1
, _2
);
3518 // CHECK-LABEL: @vsubwev_d_w(
3519 // CHECK-NEXT: entry:
3520 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3521 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3523 v2i64
vsubwev_d_w(v4i32 _1
, v4i32 _2
) {
3524 return __builtin_lsx_vsubwev_d_w(_1
, _2
);
3526 // CHECK-LABEL: @vsubwev_w_h(
3527 // CHECK-NEXT: entry:
3528 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3529 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3531 v4i32
vsubwev_w_h(v8i16 _1
, v8i16 _2
) {
3532 return __builtin_lsx_vsubwev_w_h(_1
, _2
);
3534 // CHECK-LABEL: @vsubwev_h_b(
3535 // CHECK-NEXT: entry:
3536 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3537 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3539 v8i16
vsubwev_h_b(v16i8 _1
, v16i8 _2
) {
3540 return __builtin_lsx_vsubwev_h_b(_1
, _2
);
3542 // CHECK-LABEL: @vsubwod_d_w(
3543 // CHECK-NEXT: entry:
3544 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3545 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3547 v2i64
vsubwod_d_w(v4i32 _1
, v4i32 _2
) {
3548 return __builtin_lsx_vsubwod_d_w(_1
, _2
);
3550 // CHECK-LABEL: @vsubwod_w_h(
3551 // CHECK-NEXT: entry:
3552 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3553 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3555 v4i32
vsubwod_w_h(v8i16 _1
, v8i16 _2
) {
3556 return __builtin_lsx_vsubwod_w_h(_1
, _2
);
3558 // CHECK-LABEL: @vsubwod_h_b(
3559 // CHECK-NEXT: entry:
3560 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3561 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3563 v8i16
vsubwod_h_b(v16i8 _1
, v16i8 _2
) {
3564 return __builtin_lsx_vsubwod_h_b(_1
, _2
);
3566 // CHECK-LABEL: @vsubwev_d_wu(
3567 // CHECK-NEXT: entry:
3568 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3569 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3571 v2i64
vsubwev_d_wu(v4u32 _1
, v4u32 _2
) {
3572 return __builtin_lsx_vsubwev_d_wu(_1
, _2
);
3574 // CHECK-LABEL: @vsubwev_w_hu(
3575 // CHECK-NEXT: entry:
3576 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3577 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3579 v4i32
vsubwev_w_hu(v8u16 _1
, v8u16 _2
) {
3580 return __builtin_lsx_vsubwev_w_hu(_1
, _2
);
3582 // CHECK-LABEL: @vsubwev_h_bu(
3583 // CHECK-NEXT: entry:
3584 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3585 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3587 v8i16
vsubwev_h_bu(v16u8 _1
, v16u8 _2
) {
3588 return __builtin_lsx_vsubwev_h_bu(_1
, _2
);
3590 // CHECK-LABEL: @vsubwod_d_wu(
3591 // CHECK-NEXT: entry:
3592 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3593 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3595 v2i64
vsubwod_d_wu(v4u32 _1
, v4u32 _2
) {
3596 return __builtin_lsx_vsubwod_d_wu(_1
, _2
);
3598 // CHECK-LABEL: @vsubwod_w_hu(
3599 // CHECK-NEXT: entry:
3600 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3601 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3603 v4i32
vsubwod_w_hu(v8u16 _1
, v8u16 _2
) {
3604 return __builtin_lsx_vsubwod_w_hu(_1
, _2
);
3606 // CHECK-LABEL: @vsubwod_h_bu(
3607 // CHECK-NEXT: entry:
3608 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3609 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3611 v8i16
vsubwod_h_bu(v16u8 _1
, v16u8 _2
) {
3612 return __builtin_lsx_vsubwod_h_bu(_1
, _2
);
3614 // CHECK-LABEL: @vaddwev_q_d(
3615 // CHECK-NEXT: entry:
3616 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3617 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3619 v2i64
vaddwev_q_d(v2i64 _1
, v2i64 _2
) {
3620 return __builtin_lsx_vaddwev_q_d(_1
, _2
);
3622 // CHECK-LABEL: @vaddwod_q_d(
3623 // CHECK-NEXT: entry:
3624 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3625 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3627 v2i64
vaddwod_q_d(v2i64 _1
, v2i64 _2
) {
3628 return __builtin_lsx_vaddwod_q_d(_1
, _2
);
3630 // CHECK-LABEL: @vaddwev_q_du(
3631 // CHECK-NEXT: entry:
3632 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3633 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3635 v2i64
vaddwev_q_du(v2u64 _1
, v2u64 _2
) {
3636 return __builtin_lsx_vaddwev_q_du(_1
, _2
);
3638 // CHECK-LABEL: @vaddwod_q_du(
3639 // CHECK-NEXT: entry:
3640 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3641 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3643 v2i64
vaddwod_q_du(v2u64 _1
, v2u64 _2
) {
3644 return __builtin_lsx_vaddwod_q_du(_1
, _2
);
3646 // CHECK-LABEL: @vsubwev_q_d(
3647 // CHECK-NEXT: entry:
3648 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3649 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3651 v2i64
vsubwev_q_d(v2i64 _1
, v2i64 _2
) {
3652 return __builtin_lsx_vsubwev_q_d(_1
, _2
);
3654 // CHECK-LABEL: @vsubwod_q_d(
3655 // CHECK-NEXT: entry:
3656 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3657 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3659 v2i64
vsubwod_q_d(v2i64 _1
, v2i64 _2
) {
3660 return __builtin_lsx_vsubwod_q_d(_1
, _2
);
3662 // CHECK-LABEL: @vsubwev_q_du(
3663 // CHECK-NEXT: entry:
3664 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3665 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3667 v2i64
vsubwev_q_du(v2u64 _1
, v2u64 _2
) {
3668 return __builtin_lsx_vsubwev_q_du(_1
, _2
);
3670 // CHECK-LABEL: @vsubwod_q_du(
3671 // CHECK-NEXT: entry:
3672 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3673 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3675 v2i64
vsubwod_q_du(v2u64 _1
, v2u64 _2
) {
3676 return __builtin_lsx_vsubwod_q_du(_1
, _2
);
3678 // CHECK-LABEL: @vaddwev_q_du_d(
3679 // CHECK-NEXT: entry:
3680 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3681 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3683 v2i64
vaddwev_q_du_d(v2u64 _1
, v2i64 _2
) {
3684 return __builtin_lsx_vaddwev_q_du_d(_1
, _2
);
3686 // CHECK-LABEL: @vaddwod_q_du_d(
3687 // CHECK-NEXT: entry:
3688 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3689 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3691 v2i64
vaddwod_q_du_d(v2u64 _1
, v2i64 _2
) {
3692 return __builtin_lsx_vaddwod_q_du_d(_1
, _2
);
3694 // CHECK-LABEL: @vmulwev_d_w(
3695 // CHECK-NEXT: entry:
3696 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3697 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3699 v2i64
vmulwev_d_w(v4i32 _1
, v4i32 _2
) {
3700 return __builtin_lsx_vmulwev_d_w(_1
, _2
);
3702 // CHECK-LABEL: @vmulwev_w_h(
3703 // CHECK-NEXT: entry:
3704 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3705 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3707 v4i32
vmulwev_w_h(v8i16 _1
, v8i16 _2
) {
3708 return __builtin_lsx_vmulwev_w_h(_1
, _2
);
3710 // CHECK-LABEL: @vmulwev_h_b(
3711 // CHECK-NEXT: entry:
3712 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3713 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3715 v8i16
vmulwev_h_b(v16i8 _1
, v16i8 _2
) {
3716 return __builtin_lsx_vmulwev_h_b(_1
, _2
);
3718 // CHECK-LABEL: @vmulwod_d_w(
3719 // CHECK-NEXT: entry:
3720 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3721 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3723 v2i64
vmulwod_d_w(v4i32 _1
, v4i32 _2
) {
3724 return __builtin_lsx_vmulwod_d_w(_1
, _2
);
3726 // CHECK-LABEL: @vmulwod_w_h(
3727 // CHECK-NEXT: entry:
3728 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3729 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3731 v4i32
vmulwod_w_h(v8i16 _1
, v8i16 _2
) {
3732 return __builtin_lsx_vmulwod_w_h(_1
, _2
);
3734 // CHECK-LABEL: @vmulwod_h_b(
3735 // CHECK-NEXT: entry:
3736 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3737 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3739 v8i16
vmulwod_h_b(v16i8 _1
, v16i8 _2
) {
3740 return __builtin_lsx_vmulwod_h_b(_1
, _2
);
3742 // CHECK-LABEL: @vmulwev_d_wu(
3743 // CHECK-NEXT: entry:
3744 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3745 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3747 v2i64
vmulwev_d_wu(v4u32 _1
, v4u32 _2
) {
3748 return __builtin_lsx_vmulwev_d_wu(_1
, _2
);
3750 // CHECK-LABEL: @vmulwev_w_hu(
3751 // CHECK-NEXT: entry:
3752 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3753 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3755 v4i32
vmulwev_w_hu(v8u16 _1
, v8u16 _2
) {
3756 return __builtin_lsx_vmulwev_w_hu(_1
, _2
);
3758 // CHECK-LABEL: @vmulwev_h_bu(
3759 // CHECK-NEXT: entry:
3760 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3761 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3763 v8i16
vmulwev_h_bu(v16u8 _1
, v16u8 _2
) {
3764 return __builtin_lsx_vmulwev_h_bu(_1
, _2
);
3766 // CHECK-LABEL: @vmulwod_d_wu(
3767 // CHECK-NEXT: entry:
3768 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3769 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3771 v2i64
vmulwod_d_wu(v4u32 _1
, v4u32 _2
) {
3772 return __builtin_lsx_vmulwod_d_wu(_1
, _2
);
3774 // CHECK-LABEL: @vmulwod_w_hu(
3775 // CHECK-NEXT: entry:
3776 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3777 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3779 v4i32
vmulwod_w_hu(v8u16 _1
, v8u16 _2
) {
3780 return __builtin_lsx_vmulwod_w_hu(_1
, _2
);
3782 // CHECK-LABEL: @vmulwod_h_bu(
3783 // CHECK-NEXT: entry:
3784 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3785 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3787 v8i16
vmulwod_h_bu(v16u8 _1
, v16u8 _2
) {
3788 return __builtin_lsx_vmulwod_h_bu(_1
, _2
);
3790 // CHECK-LABEL: @vmulwev_d_wu_w(
3791 // CHECK-NEXT: entry:
3792 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3793 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3795 v2i64
vmulwev_d_wu_w(v4u32 _1
, v4i32 _2
) {
3796 return __builtin_lsx_vmulwev_d_wu_w(_1
, _2
);
3798 // CHECK-LABEL: @vmulwev_w_hu_h(
3799 // CHECK-NEXT: entry:
3800 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3801 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3803 v4i32
vmulwev_w_hu_h(v8u16 _1
, v8i16 _2
) {
3804 return __builtin_lsx_vmulwev_w_hu_h(_1
, _2
);
3806 // CHECK-LABEL: @vmulwev_h_bu_b(
3807 // CHECK-NEXT: entry:
3808 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3809 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3811 v8i16
vmulwev_h_bu_b(v16u8 _1
, v16i8 _2
) {
3812 return __builtin_lsx_vmulwev_h_bu_b(_1
, _2
);
3814 // CHECK-LABEL: @vmulwod_d_wu_w(
3815 // CHECK-NEXT: entry:
3816 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
3817 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3819 v2i64
vmulwod_d_wu_w(v4u32 _1
, v4i32 _2
) {
3820 return __builtin_lsx_vmulwod_d_wu_w(_1
, _2
);
3822 // CHECK-LABEL: @vmulwod_w_hu_h(
3823 // CHECK-NEXT: entry:
3824 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
3825 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3827 v4i32
vmulwod_w_hu_h(v8u16 _1
, v8i16 _2
) {
3828 return __builtin_lsx_vmulwod_w_hu_h(_1
, _2
);
3830 // CHECK-LABEL: @vmulwod_h_bu_b(
3831 // CHECK-NEXT: entry:
3832 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
3833 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3835 v8i16
vmulwod_h_bu_b(v16u8 _1
, v16i8 _2
) {
3836 return __builtin_lsx_vmulwod_h_bu_b(_1
, _2
);
3838 // CHECK-LABEL: @vmulwev_q_d(
3839 // CHECK-NEXT: entry:
3840 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3841 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3843 v2i64
vmulwev_q_d(v2i64 _1
, v2i64 _2
) {
3844 return __builtin_lsx_vmulwev_q_d(_1
, _2
);
3846 // CHECK-LABEL: @vmulwod_q_d(
3847 // CHECK-NEXT: entry:
3848 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3849 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3851 v2i64
vmulwod_q_d(v2i64 _1
, v2i64 _2
) {
3852 return __builtin_lsx_vmulwod_q_d(_1
, _2
);
3854 // CHECK-LABEL: @vmulwev_q_du(
3855 // CHECK-NEXT: entry:
3856 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3857 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3859 v2i64
vmulwev_q_du(v2u64 _1
, v2u64 _2
) {
3860 return __builtin_lsx_vmulwev_q_du(_1
, _2
);
3862 // CHECK-LABEL: @vmulwod_q_du(
3863 // CHECK-NEXT: entry:
3864 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3865 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3867 v2i64
vmulwod_q_du(v2u64 _1
, v2u64 _2
) {
3868 return __builtin_lsx_vmulwod_q_du(_1
, _2
);
3870 // CHECK-LABEL: @vmulwev_q_du_d(
3871 // CHECK-NEXT: entry:
3872 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3873 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3875 v2i64
vmulwev_q_du_d(v2u64 _1
, v2i64 _2
) {
3876 return __builtin_lsx_vmulwev_q_du_d(_1
, _2
);
3878 // CHECK-LABEL: @vmulwod_q_du_d(
3879 // CHECK-NEXT: entry:
3880 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3881 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3883 v2i64
vmulwod_q_du_d(v2u64 _1
, v2i64 _2
) {
3884 return __builtin_lsx_vmulwod_q_du_d(_1
, _2
);
3886 // CHECK-LABEL: @vhaddw_q_d(
3887 // CHECK-NEXT: entry:
3888 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3889 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3891 v2i64
vhaddw_q_d(v2i64 _1
, v2i64 _2
) {
3892 return __builtin_lsx_vhaddw_q_d(_1
, _2
);
3894 // CHECK-LABEL: @vhaddw_qu_du(
3895 // CHECK-NEXT: entry:
3896 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhaddw.qu.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3897 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3899 v2u64
vhaddw_qu_du(v2u64 _1
, v2u64 _2
) {
3900 return __builtin_lsx_vhaddw_qu_du(_1
, _2
);
3902 // CHECK-LABEL: @vhsubw_q_d(
3903 // CHECK-NEXT: entry:
3904 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3905 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3907 v2i64
vhsubw_q_d(v2i64 _1
, v2i64 _2
) {
3908 return __builtin_lsx_vhsubw_q_d(_1
, _2
);
3910 // CHECK-LABEL: @vhsubw_qu_du(
3911 // CHECK-NEXT: entry:
3912 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vhsubw.qu.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
3913 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3915 v2u64
vhsubw_qu_du(v2u64 _1
, v2u64 _2
) {
3916 return __builtin_lsx_vhsubw_qu_du(_1
, _2
);
3918 // CHECK-LABEL: @vmaddwev_d_w(
3919 // CHECK-NEXT: entry:
3920 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
3921 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3923 v2i64
vmaddwev_d_w(v2i64 _1
, v4i32 _2
, v4i32 _3
) {
3924 return __builtin_lsx_vmaddwev_d_w(_1
, _2
, _3
);
3926 // CHECK-LABEL: @vmaddwev_w_h(
3927 // CHECK-NEXT: entry:
3928 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
3929 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3931 v4i32
vmaddwev_w_h(v4i32 _1
, v8i16 _2
, v8i16 _3
) {
3932 return __builtin_lsx_vmaddwev_w_h(_1
, _2
, _3
);
3934 // CHECK-LABEL: @vmaddwev_h_b(
3935 // CHECK-NEXT: entry:
3936 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
3937 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3939 v8i16
vmaddwev_h_b(v8i16 _1
, v16i8 _2
, v16i8 _3
) {
3940 return __builtin_lsx_vmaddwev_h_b(_1
, _2
, _3
);
3942 // CHECK-LABEL: @vmaddwev_d_wu(
3943 // CHECK-NEXT: entry:
3944 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
3945 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3947 v2u64
vmaddwev_d_wu(v2u64 _1
, v4u32 _2
, v4u32 _3
) {
3948 return __builtin_lsx_vmaddwev_d_wu(_1
, _2
, _3
);
3950 // CHECK-LABEL: @vmaddwev_w_hu(
3951 // CHECK-NEXT: entry:
3952 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
3953 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3955 v4u32
vmaddwev_w_hu(v4u32 _1
, v8u16 _2
, v8u16 _3
) {
3956 return __builtin_lsx_vmaddwev_w_hu(_1
, _2
, _3
);
3958 // CHECK-LABEL: @vmaddwev_h_bu(
3959 // CHECK-NEXT: entry:
3960 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
3961 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3963 v8u16
vmaddwev_h_bu(v8u16 _1
, v16u8 _2
, v16u8 _3
) {
3964 return __builtin_lsx_vmaddwev_h_bu(_1
, _2
, _3
);
3966 // CHECK-LABEL: @vmaddwod_d_w(
3967 // CHECK-NEXT: entry:
3968 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
3969 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3971 v2i64
vmaddwod_d_w(v2i64 _1
, v4i32 _2
, v4i32 _3
) {
3972 return __builtin_lsx_vmaddwod_d_w(_1
, _2
, _3
);
3974 // CHECK-LABEL: @vmaddwod_w_h(
3975 // CHECK-NEXT: entry:
3976 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
3977 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
3979 v4i32
vmaddwod_w_h(v4i32 _1
, v8i16 _2
, v8i16 _3
) {
3980 return __builtin_lsx_vmaddwod_w_h(_1
, _2
, _3
);
3982 // CHECK-LABEL: @vmaddwod_h_b(
3983 // CHECK-NEXT: entry:
3984 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
3985 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
3987 v8i16
vmaddwod_h_b(v8i16 _1
, v16i8 _2
, v16i8 _3
) {
3988 return __builtin_lsx_vmaddwod_h_b(_1
, _2
, _3
);
3990 // CHECK-LABEL: @vmaddwod_d_wu(
3991 // CHECK-NEXT: entry:
3992 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
3993 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
3995 v2u64
vmaddwod_d_wu(v2u64 _1
, v4u32 _2
, v4u32 _3
) {
3996 return __builtin_lsx_vmaddwod_d_wu(_1
, _2
, _3
);
3998 // CHECK-LABEL: @vmaddwod_w_hu(
3999 // CHECK-NEXT: entry:
4000 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
4001 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4003 v4u32
vmaddwod_w_hu(v4u32 _1
, v8u16 _2
, v8u16 _3
) {
4004 return __builtin_lsx_vmaddwod_w_hu(_1
, _2
, _3
);
4006 // CHECK-LABEL: @vmaddwod_h_bu(
4007 // CHECK-NEXT: entry:
4008 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
4009 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4011 v8u16
vmaddwod_h_bu(v8u16 _1
, v16u8 _2
, v16u8 _3
) {
4012 return __builtin_lsx_vmaddwod_h_bu(_1
, _2
, _3
);
4014 // CHECK-LABEL: @vmaddwev_d_wu_w(
4015 // CHECK-NEXT: entry:
4016 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
4017 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4019 v2i64
vmaddwev_d_wu_w(v2i64 _1
, v4u32 _2
, v4i32 _3
) {
4020 return __builtin_lsx_vmaddwev_d_wu_w(_1
, _2
, _3
);
4022 // CHECK-LABEL: @vmaddwev_w_hu_h(
4023 // CHECK-NEXT: entry:
4024 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
4025 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4027 v4i32
vmaddwev_w_hu_h(v4i32 _1
, v8u16 _2
, v8i16 _3
) {
4028 return __builtin_lsx_vmaddwev_w_hu_h(_1
, _2
, _3
);
4030 // CHECK-LABEL: @vmaddwev_h_bu_b(
4031 // CHECK-NEXT: entry:
4032 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
4033 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4035 v8i16
vmaddwev_h_bu_b(v8i16 _1
, v16u8 _2
, v16i8 _3
) {
4036 return __builtin_lsx_vmaddwev_h_bu_b(_1
, _2
, _3
);
4038 // CHECK-LABEL: @vmaddwod_d_wu_w(
4039 // CHECK-NEXT: entry:
4040 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64> [[_1:%.*]], <4 x i32> [[_2:%.*]], <4 x i32> [[_3:%.*]])
4041 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4043 v2i64
vmaddwod_d_wu_w(v2i64 _1
, v4u32 _2
, v4i32 _3
) {
4044 return __builtin_lsx_vmaddwod_d_wu_w(_1
, _2
, _3
);
4046 // CHECK-LABEL: @vmaddwod_w_hu_h(
4047 // CHECK-NEXT: entry:
4048 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32> [[_1:%.*]], <8 x i16> [[_2:%.*]], <8 x i16> [[_3:%.*]])
4049 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4051 v4i32
vmaddwod_w_hu_h(v4i32 _1
, v8u16 _2
, v8i16 _3
) {
4052 return __builtin_lsx_vmaddwod_w_hu_h(_1
, _2
, _3
);
4054 // CHECK-LABEL: @vmaddwod_h_bu_b(
4055 // CHECK-NEXT: entry:
4056 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
4057 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4059 v8i16
vmaddwod_h_bu_b(v8i16 _1
, v16u8 _2
, v16i8 _3
) {
4060 return __builtin_lsx_vmaddwod_h_bu_b(_1
, _2
, _3
);
4062 // CHECK-LABEL: @vmaddwev_q_d(
4063 // CHECK-NEXT: entry:
4064 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4065 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4067 v2i64
vmaddwev_q_d(v2i64 _1
, v2i64 _2
, v2i64 _3
) {
4068 return __builtin_lsx_vmaddwev_q_d(_1
, _2
, _3
);
4070 // CHECK-LABEL: @vmaddwod_q_d(
4071 // CHECK-NEXT: entry:
4072 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4073 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4075 v2i64
vmaddwod_q_d(v2i64 _1
, v2i64 _2
, v2i64 _3
) {
4076 return __builtin_lsx_vmaddwod_q_d(_1
, _2
, _3
);
4078 // CHECK-LABEL: @vmaddwev_q_du(
4079 // CHECK-NEXT: entry:
4080 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4081 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4083 v2u64
vmaddwev_q_du(v2u64 _1
, v2u64 _2
, v2u64 _3
) {
4084 return __builtin_lsx_vmaddwev_q_du(_1
, _2
, _3
);
4086 // CHECK-LABEL: @vmaddwod_q_du(
4087 // CHECK-NEXT: entry:
4088 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4089 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4091 v2u64
vmaddwod_q_du(v2u64 _1
, v2u64 _2
, v2u64 _3
) {
4092 return __builtin_lsx_vmaddwod_q_du(_1
, _2
, _3
);
4094 // CHECK-LABEL: @vmaddwev_q_du_d(
4095 // CHECK-NEXT: entry:
4096 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4097 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4099 v2i64
vmaddwev_q_du_d(v2i64 _1
, v2u64 _2
, v2i64 _3
) {
4100 return __builtin_lsx_vmaddwev_q_du_d(_1
, _2
, _3
);
4102 // CHECK-LABEL: @vmaddwod_q_du_d(
4103 // CHECK-NEXT: entry:
4104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], <2 x i64> [[_3:%.*]])
4105 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4107 v2i64
vmaddwod_q_du_d(v2i64 _1
, v2u64 _2
, v2i64 _3
) {
4108 return __builtin_lsx_vmaddwod_q_du_d(_1
, _2
, _3
);
4110 // CHECK-LABEL: @vrotr_b(
4111 // CHECK-NEXT: entry:
4112 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrotr.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
4113 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4115 v16i8
vrotr_b(v16i8 _1
, v16i8 _2
) {
4116 return __builtin_lsx_vrotr_b(_1
, _2
);
4118 // CHECK-LABEL: @vrotr_h(
4119 // CHECK-NEXT: entry:
4120 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrotr.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
4121 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4123 v8i16
vrotr_h(v8i16 _1
, v8i16 _2
) {
4124 return __builtin_lsx_vrotr_h(_1
, _2
);
4126 // CHECK-LABEL: @vrotr_w(
4127 // CHECK-NEXT: entry:
4128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrotr.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
4129 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4131 v4i32
vrotr_w(v4i32 _1
, v4i32 _2
) {
4132 return __builtin_lsx_vrotr_w(_1
, _2
);
4134 // CHECK-LABEL: @vrotr_d(
4135 // CHECK-NEXT: entry:
4136 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrotr.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
4137 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4139 v2i64
vrotr_d(v2i64 _1
, v2i64 _2
) {
4140 return __builtin_lsx_vrotr_d(_1
, _2
);
4142 // CHECK-LABEL: @vadd_q(
4143 // CHECK-NEXT: entry:
4144 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vadd.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
4145 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4147 v2i64
vadd_q(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vadd_q(_1
, _2
); }
4148 // CHECK-LABEL: @vsub_q(
4149 // CHECK-NEXT: entry:
4150 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsub.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
4151 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4153 v2i64
vsub_q(v2i64 _1
, v2i64 _2
) { return __builtin_lsx_vsub_q(_1
, _2
); }
4154 // CHECK-LABEL: @vldrepl_b(
4155 // CHECK-NEXT: entry:
4156 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(ptr [[_1:%.*]], i32 1)
4157 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4159 v16i8
vldrepl_b(void *_1
) { return __builtin_lsx_vldrepl_b(_1
, 1); }
4160 // CHECK-LABEL: @vldrepl_h(
4161 // CHECK-NEXT: entry:
4162 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(ptr [[_1:%.*]], i32 2)
4163 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4165 v8i16
vldrepl_h(void *_1
) { return __builtin_lsx_vldrepl_h(_1
, 2); }
4166 // CHECK-LABEL: @vldrepl_w(
4167 // CHECK-NEXT: entry:
4168 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(ptr [[_1:%.*]], i32 4)
4169 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4171 v4i32
vldrepl_w(void *_1
) { return __builtin_lsx_vldrepl_w(_1
, 4); }
4172 // CHECK-LABEL: @vldrepl_d(
4173 // CHECK-NEXT: entry:
4174 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(ptr [[_1:%.*]], i32 8)
4175 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4177 v2i64
vldrepl_d(void *_1
) { return __builtin_lsx_vldrepl_d(_1
, 8); }
4178 // CHECK-LABEL: @vmskgez_b(
4179 // CHECK-NEXT: entry:
4180 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmskgez.b(<16 x i8> [[_1:%.*]])
4181 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4183 v16i8
vmskgez_b(v16i8 _1
) { return __builtin_lsx_vmskgez_b(_1
); }
4184 // CHECK-LABEL: @vmsknz_b(
4185 // CHECK-NEXT: entry:
4186 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vmsknz.b(<16 x i8> [[_1:%.*]])
4187 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4189 v16i8
vmsknz_b(v16i8 _1
) { return __builtin_lsx_vmsknz_b(_1
); }
4190 // CHECK-LABEL: @vexth_h_b(
4191 // CHECK-NEXT: entry:
4192 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8> [[_1:%.*]])
4193 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4195 v8i16
vexth_h_b(v16i8 _1
) { return __builtin_lsx_vexth_h_b(_1
); }
4196 // CHECK-LABEL: @vexth_w_h(
4197 // CHECK-NEXT: entry:
4198 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16> [[_1:%.*]])
4199 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4201 v4i32
vexth_w_h(v8i16 _1
) { return __builtin_lsx_vexth_w_h(_1
); }
4202 // CHECK-LABEL: @vexth_d_w(
4203 // CHECK-NEXT: entry:
4204 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32> [[_1:%.*]])
4205 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4207 v2i64
vexth_d_w(v4i32 _1
) { return __builtin_lsx_vexth_d_w(_1
); }
4208 // CHECK-LABEL: @vexth_q_d(
4209 // CHECK-NEXT: entry:
4210 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64> [[_1:%.*]])
4211 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4213 v2i64
vexth_q_d(v2i64 _1
) { return __builtin_lsx_vexth_q_d(_1
); }
4214 // CHECK-LABEL: @vexth_hu_bu(
4215 // CHECK-NEXT: entry:
4216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8> [[_1:%.*]])
4217 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4219 v8u16
vexth_hu_bu(v16u8 _1
) { return __builtin_lsx_vexth_hu_bu(_1
); }
4220 // CHECK-LABEL: @vexth_wu_hu(
4221 // CHECK-NEXT: entry:
4222 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16> [[_1:%.*]])
4223 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4225 v4u32
vexth_wu_hu(v8u16 _1
) { return __builtin_lsx_vexth_wu_hu(_1
); }
4226 // CHECK-LABEL: @vexth_du_wu(
4227 // CHECK-NEXT: entry:
4228 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32> [[_1:%.*]])
4229 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4231 v2u64
vexth_du_wu(v4u32 _1
) { return __builtin_lsx_vexth_du_wu(_1
); }
4232 // CHECK-LABEL: @vexth_qu_du(
4233 // CHECK-NEXT: entry:
4234 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64> [[_1:%.*]])
4235 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4237 v2u64
vexth_qu_du(v2u64 _1
) { return __builtin_lsx_vexth_qu_du(_1
); }
4238 // CHECK-LABEL: @vrotri_b(
4239 // CHECK-NEXT: entry:
4240 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrotri.b(<16 x i8> [[_1:%.*]], i32 1)
4241 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4243 v16i8
vrotri_b(v16i8 _1
) { return __builtin_lsx_vrotri_b(_1
, 1); }
4244 // CHECK-LABEL: @vrotri_h(
4245 // CHECK-NEXT: entry:
4246 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrotri.h(<8 x i16> [[_1:%.*]], i32 1)
4247 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4249 v8i16
vrotri_h(v8i16 _1
) { return __builtin_lsx_vrotri_h(_1
, 1); }
4250 // CHECK-LABEL: @vrotri_w(
4251 // CHECK-NEXT: entry:
4252 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrotri.w(<4 x i32> [[_1:%.*]], i32 1)
4253 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4255 v4i32
vrotri_w(v4i32 _1
) { return __builtin_lsx_vrotri_w(_1
, 1); }
4256 // CHECK-LABEL: @vrotri_d(
4257 // CHECK-NEXT: entry:
4258 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrotri.d(<2 x i64> [[_1:%.*]], i32 1)
4259 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4261 v2i64
vrotri_d(v2i64 _1
) { return __builtin_lsx_vrotri_d(_1
, 1); }
4262 // CHECK-LABEL: @vextl_q_d(
4263 // CHECK-NEXT: entry:
4264 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextl.q.d(<2 x i64> [[_1:%.*]])
4265 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4267 v2i64
vextl_q_d(v2i64 _1
) { return __builtin_lsx_vextl_q_d(_1
); }
4268 // CHECK-LABEL: @vsrlni_b_h(
4269 // CHECK-NEXT: entry:
4270 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4271 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4273 v16i8
vsrlni_b_h(v16i8 _1
, v16i8 _2
) {
4274 return __builtin_lsx_vsrlni_b_h(_1
, _2
, 1);
4276 // CHECK-LABEL: @vsrlni_h_w(
4277 // CHECK-NEXT: entry:
4278 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4279 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4281 v8i16
vsrlni_h_w(v8i16 _1
, v8i16 _2
) {
4282 return __builtin_lsx_vsrlni_h_w(_1
, _2
, 1);
4284 // CHECK-LABEL: @vsrlni_w_d(
4285 // CHECK-NEXT: entry:
4286 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4287 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4289 v4i32
vsrlni_w_d(v4i32 _1
, v4i32 _2
) {
4290 return __builtin_lsx_vsrlni_w_d(_1
, _2
, 1);
4292 // CHECK-LABEL: @vsrlni_d_q(
4293 // CHECK-NEXT: entry:
4294 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4295 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4297 v2i64
vsrlni_d_q(v2i64 _1
, v2i64 _2
) {
4298 return __builtin_lsx_vsrlni_d_q(_1
, _2
, 1);
4300 // CHECK-LABEL: @vsrlrni_b_h(
4301 // CHECK-NEXT: entry:
4302 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrlrni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4303 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4305 v16i8
vsrlrni_b_h(v16i8 _1
, v16i8 _2
) {
4306 return __builtin_lsx_vsrlrni_b_h(_1
, _2
, 1);
4308 // CHECK-LABEL: @vsrlrni_h_w(
4309 // CHECK-NEXT: entry:
4310 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrlrni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4311 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4313 v8i16
vsrlrni_h_w(v8i16 _1
, v8i16 _2
) {
4314 return __builtin_lsx_vsrlrni_h_w(_1
, _2
, 1);
4316 // CHECK-LABEL: @vsrlrni_w_d(
4317 // CHECK-NEXT: entry:
4318 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrlrni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4319 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4321 v4i32
vsrlrni_w_d(v4i32 _1
, v4i32 _2
) {
4322 return __builtin_lsx_vsrlrni_w_d(_1
, _2
, 1);
4324 // CHECK-LABEL: @vsrlrni_d_q(
4325 // CHECK-NEXT: entry:
4326 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrlrni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4327 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4329 v2i64
vsrlrni_d_q(v2i64 _1
, v2i64 _2
) {
4330 return __builtin_lsx_vsrlrni_d_q(_1
, _2
, 1);
4332 // CHECK-LABEL: @vssrlni_b_h(
4333 // CHECK-NEXT: entry:
4334 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4335 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4337 v16i8
vssrlni_b_h(v16i8 _1
, v16i8 _2
) {
4338 return __builtin_lsx_vssrlni_b_h(_1
, _2
, 1);
4340 // CHECK-LABEL: @vssrlni_h_w(
4341 // CHECK-NEXT: entry:
4342 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4343 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4345 v8i16
vssrlni_h_w(v8i16 _1
, v8i16 _2
) {
4346 return __builtin_lsx_vssrlni_h_w(_1
, _2
, 1);
4348 // CHECK-LABEL: @vssrlni_w_d(
4349 // CHECK-NEXT: entry:
4350 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4351 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4353 v4i32
vssrlni_w_d(v4i32 _1
, v4i32 _2
) {
4354 return __builtin_lsx_vssrlni_w_d(_1
, _2
, 1);
4356 // CHECK-LABEL: @vssrlni_d_q(
4357 // CHECK-NEXT: entry:
4358 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4359 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4361 v2i64
vssrlni_d_q(v2i64 _1
, v2i64 _2
) {
4362 return __builtin_lsx_vssrlni_d_q(_1
, _2
, 1);
4364 // CHECK-LABEL: @vssrlni_bu_h(
4365 // CHECK-NEXT: entry:
4366 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4367 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4369 v16u8
vssrlni_bu_h(v16u8 _1
, v16i8 _2
) {
4370 return __builtin_lsx_vssrlni_bu_h(_1
, _2
, 1);
4372 // CHECK-LABEL: @vssrlni_hu_w(
4373 // CHECK-NEXT: entry:
4374 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4375 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4377 v8u16
vssrlni_hu_w(v8u16 _1
, v8i16 _2
) {
4378 return __builtin_lsx_vssrlni_hu_w(_1
, _2
, 1);
4380 // CHECK-LABEL: @vssrlni_wu_d(
4381 // CHECK-NEXT: entry:
4382 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4383 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4385 v4u32
vssrlni_wu_d(v4u32 _1
, v4i32 _2
) {
4386 return __builtin_lsx_vssrlni_wu_d(_1
, _2
, 1);
4388 // CHECK-LABEL: @vssrlni_du_q(
4389 // CHECK-NEXT: entry:
4390 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4391 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4393 v2u64
vssrlni_du_q(v2u64 _1
, v2i64 _2
) {
4394 return __builtin_lsx_vssrlni_du_q(_1
, _2
, 1);
4396 // CHECK-LABEL: @vssrlrni_b_h(
4397 // CHECK-NEXT: entry:
4398 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4399 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4401 v16i8
vssrlrni_b_h(v16i8 _1
, v16i8 _2
) {
4402 return __builtin_lsx_vssrlrni_b_h(_1
, _2
, 1);
4404 // CHECK-LABEL: @vssrlrni_h_w(
4405 // CHECK-NEXT: entry:
4406 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4407 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4409 v8i16
vssrlrni_h_w(v8i16 _1
, v8i16 _2
) {
4410 return __builtin_lsx_vssrlrni_h_w(_1
, _2
, 1);
4412 // CHECK-LABEL: @vssrlrni_w_d(
4413 // CHECK-NEXT: entry:
4414 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4415 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4417 v4i32
vssrlrni_w_d(v4i32 _1
, v4i32 _2
) {
4418 return __builtin_lsx_vssrlrni_w_d(_1
, _2
, 1);
4420 // CHECK-LABEL: @vssrlrni_d_q(
4421 // CHECK-NEXT: entry:
4422 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlrni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4423 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4425 v2i64
vssrlrni_d_q(v2i64 _1
, v2i64 _2
) {
4426 return __builtin_lsx_vssrlrni_d_q(_1
, _2
, 1);
4428 // CHECK-LABEL: @vssrlrni_bu_h(
4429 // CHECK-NEXT: entry:
4430 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrni.bu.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4431 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4433 v16u8
vssrlrni_bu_h(v16u8 _1
, v16i8 _2
) {
4434 return __builtin_lsx_vssrlrni_bu_h(_1
, _2
, 1);
4436 // CHECK-LABEL: @vssrlrni_hu_w(
4437 // CHECK-NEXT: entry:
4438 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrni.hu.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4439 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4441 v8u16
vssrlrni_hu_w(v8u16 _1
, v8i16 _2
) {
4442 return __builtin_lsx_vssrlrni_hu_w(_1
, _2
, 1);
4444 // CHECK-LABEL: @vssrlrni_wu_d(
4445 // CHECK-NEXT: entry:
4446 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrni.wu.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4447 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4449 v4u32
vssrlrni_wu_d(v4u32 _1
, v4i32 _2
) {
4450 return __builtin_lsx_vssrlrni_wu_d(_1
, _2
, 1);
4452 // CHECK-LABEL: @vssrlrni_du_q(
4453 // CHECK-NEXT: entry:
4454 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrlrni.du.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4455 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4457 v2u64
vssrlrni_du_q(v2u64 _1
, v2i64 _2
) {
4458 return __builtin_lsx_vssrlrni_du_q(_1
, _2
, 1);
4460 // CHECK-LABEL: @vsrani_b_h(
4461 // CHECK-NEXT: entry:
4462 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrani.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4463 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4465 v16i8
vsrani_b_h(v16i8 _1
, v16i8 _2
) {
4466 return __builtin_lsx_vsrani_b_h(_1
, _2
, 1);
4468 // CHECK-LABEL: @vsrani_h_w(
4469 // CHECK-NEXT: entry:
4470 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrani.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4471 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4473 v8i16
vsrani_h_w(v8i16 _1
, v8i16 _2
) {
4474 return __builtin_lsx_vsrani_h_w(_1
, _2
, 1);
4476 // CHECK-LABEL: @vsrani_w_d(
4477 // CHECK-NEXT: entry:
4478 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrani.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4479 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4481 v4i32
vsrani_w_d(v4i32 _1
, v4i32 _2
) {
4482 return __builtin_lsx_vsrani_w_d(_1
, _2
, 1);
4484 // CHECK-LABEL: @vsrani_d_q(
4485 // CHECK-NEXT: entry:
4486 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrani.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4487 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4489 v2i64
vsrani_d_q(v2i64 _1
, v2i64 _2
) {
4490 return __builtin_lsx_vsrani_d_q(_1
, _2
, 1);
4492 // CHECK-LABEL: @vsrarni_b_h(
4493 // CHECK-NEXT: entry:
4494 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vsrarni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4495 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4497 v16i8
vsrarni_b_h(v16i8 _1
, v16i8 _2
) {
4498 return __builtin_lsx_vsrarni_b_h(_1
, _2
, 1);
4500 // CHECK-LABEL: @vsrarni_h_w(
4501 // CHECK-NEXT: entry:
4502 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vsrarni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4503 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4505 v8i16
vsrarni_h_w(v8i16 _1
, v8i16 _2
) {
4506 return __builtin_lsx_vsrarni_h_w(_1
, _2
, 1);
4508 // CHECK-LABEL: @vsrarni_w_d(
4509 // CHECK-NEXT: entry:
4510 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vsrarni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4511 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4513 v4i32
vsrarni_w_d(v4i32 _1
, v4i32 _2
) {
4514 return __builtin_lsx_vsrarni_w_d(_1
, _2
, 1);
4516 // CHECK-LABEL: @vsrarni_d_q(
4517 // CHECK-NEXT: entry:
4518 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vsrarni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4519 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4521 v2i64
vsrarni_d_q(v2i64 _1
, v2i64 _2
) {
4522 return __builtin_lsx_vsrarni_d_q(_1
, _2
, 1);
4524 // CHECK-LABEL: @vssrani_b_h(
4525 // CHECK-NEXT: entry:
4526 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4527 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4529 v16i8
vssrani_b_h(v16i8 _1
, v16i8 _2
) {
4530 return __builtin_lsx_vssrani_b_h(_1
, _2
, 1);
4532 // CHECK-LABEL: @vssrani_h_w(
4533 // CHECK-NEXT: entry:
4534 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4535 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4537 v8i16
vssrani_h_w(v8i16 _1
, v8i16 _2
) {
4538 return __builtin_lsx_vssrani_h_w(_1
, _2
, 1);
4540 // CHECK-LABEL: @vssrani_w_d(
4541 // CHECK-NEXT: entry:
4542 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4543 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4545 v4i32
vssrani_w_d(v4i32 _1
, v4i32 _2
) {
4546 return __builtin_lsx_vssrani_w_d(_1
, _2
, 1);
4548 // CHECK-LABEL: @vssrani_d_q(
4549 // CHECK-NEXT: entry:
4550 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4551 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4553 v2i64
vssrani_d_q(v2i64 _1
, v2i64 _2
) {
4554 return __builtin_lsx_vssrani_d_q(_1
, _2
, 1);
4556 // CHECK-LABEL: @vssrani_bu_h(
4557 // CHECK-NEXT: entry:
4558 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4559 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4561 v16u8
vssrani_bu_h(v16u8 _1
, v16i8 _2
) {
4562 return __builtin_lsx_vssrani_bu_h(_1
, _2
, 1);
4564 // CHECK-LABEL: @vssrani_hu_w(
4565 // CHECK-NEXT: entry:
4566 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4567 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4569 v8u16
vssrani_hu_w(v8u16 _1
, v8i16 _2
) {
4570 return __builtin_lsx_vssrani_hu_w(_1
, _2
, 1);
4572 // CHECK-LABEL: @vssrani_wu_d(
4573 // CHECK-NEXT: entry:
4574 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4575 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4577 v4u32
vssrani_wu_d(v4u32 _1
, v4i32 _2
) {
4578 return __builtin_lsx_vssrani_wu_d(_1
, _2
, 1);
4580 // CHECK-LABEL: @vssrani_du_q(
4581 // CHECK-NEXT: entry:
4582 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4583 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4585 v2u64
vssrani_du_q(v2u64 _1
, v2i64 _2
) {
4586 return __builtin_lsx_vssrani_du_q(_1
, _2
, 1);
4588 // CHECK-LABEL: @vssrarni_b_h(
4589 // CHECK-NEXT: entry:
4590 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarni.b.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4591 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4593 v16i8
vssrarni_b_h(v16i8 _1
, v16i8 _2
) {
4594 return __builtin_lsx_vssrarni_b_h(_1
, _2
, 1);
4596 // CHECK-LABEL: @vssrarni_h_w(
4597 // CHECK-NEXT: entry:
4598 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarni.h.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4599 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4601 v8i16
vssrarni_h_w(v8i16 _1
, v8i16 _2
) {
4602 return __builtin_lsx_vssrarni_h_w(_1
, _2
, 1);
4604 // CHECK-LABEL: @vssrarni_w_d(
4605 // CHECK-NEXT: entry:
4606 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarni.w.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4607 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4609 v4i32
vssrarni_w_d(v4i32 _1
, v4i32 _2
) {
4610 return __builtin_lsx_vssrarni_w_d(_1
, _2
, 1);
4612 // CHECK-LABEL: @vssrarni_d_q(
4613 // CHECK-NEXT: entry:
4614 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrarni.d.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4615 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4617 v2i64
vssrarni_d_q(v2i64 _1
, v2i64 _2
) {
4618 return __builtin_lsx_vssrarni_d_q(_1
, _2
, 1);
4620 // CHECK-LABEL: @vssrarni_bu_h(
4621 // CHECK-NEXT: entry:
4622 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrarni.bu.h(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], i32 1)
4623 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4625 v16u8
vssrarni_bu_h(v16u8 _1
, v16i8 _2
) {
4626 return __builtin_lsx_vssrarni_bu_h(_1
, _2
, 1);
4628 // CHECK-LABEL: @vssrarni_hu_w(
4629 // CHECK-NEXT: entry:
4630 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrarni.hu.w(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]], i32 1)
4631 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4633 v8u16
vssrarni_hu_w(v8u16 _1
, v8i16 _2
) {
4634 return __builtin_lsx_vssrarni_hu_w(_1
, _2
, 1);
4636 // CHECK-LABEL: @vssrarni_wu_d(
4637 // CHECK-NEXT: entry:
4638 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrarni.wu.d(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4639 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4641 v4u32
vssrarni_wu_d(v4u32 _1
, v4i32 _2
) {
4642 return __builtin_lsx_vssrarni_wu_d(_1
, _2
, 1);
4644 // CHECK-LABEL: @vssrarni_du_q(
4645 // CHECK-NEXT: entry:
4646 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vssrarni.du.q(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]], i32 1)
4647 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4649 v2u64
vssrarni_du_q(v2u64 _1
, v2i64 _2
) {
4650 return __builtin_lsx_vssrarni_du_q(_1
, _2
, 1);
4652 // CHECK-LABEL: @vpermi_w(
4653 // CHECK-NEXT: entry:
4654 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vpermi.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]], i32 1)
4655 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4657 v4i32
vpermi_w(v4i32 _1
, v4i32 _2
) {
4658 return __builtin_lsx_vpermi_w(_1
, _2
, 1);
4660 // CHECK-LABEL: @vld(
4661 // CHECK-NEXT: entry:
4662 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vld(ptr [[_1:%.*]], i32 1)
4663 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4665 v16i8
vld(void *_1
) { return __builtin_lsx_vld(_1
, 1); }
4666 // CHECK-LABEL: @vst(
4667 // CHECK-NEXT: entry:
4668 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vst(<16 x i8> [[_1:%.*]], ptr [[_2:%.*]], i32 1)
4669 // CHECK-NEXT: ret void
4671 void vst(v16i8 _1
, void *_2
) { return __builtin_lsx_vst(_1
, _2
, 1); }
4672 // CHECK-LABEL: @vssrlrn_b_h(
4673 // CHECK-NEXT: entry:
4674 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrlrn.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
4675 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4677 v16i8
vssrlrn_b_h(v8i16 _1
, v8i16 _2
) {
4678 return __builtin_lsx_vssrlrn_b_h(_1
, _2
);
4680 // CHECK-LABEL: @vssrlrn_h_w(
4681 // CHECK-NEXT: entry:
4682 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrlrn.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
4683 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4685 v8i16
vssrlrn_h_w(v4i32 _1
, v4i32 _2
) {
4686 return __builtin_lsx_vssrlrn_h_w(_1
, _2
);
4688 // CHECK-LABEL: @vssrlrn_w_d(
4689 // CHECK-NEXT: entry:
4690 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrlrn.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
4691 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4693 v4i32
vssrlrn_w_d(v2i64 _1
, v2i64 _2
) {
4694 return __builtin_lsx_vssrlrn_w_d(_1
, _2
);
4696 // CHECK-LABEL: @vssrln_b_h(
4697 // CHECK-NEXT: entry:
4698 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vssrln.b.h(<8 x i16> [[_1:%.*]], <8 x i16> [[_2:%.*]])
4699 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4701 v16i8
vssrln_b_h(v8i16 _1
, v8i16 _2
) {
4702 return __builtin_lsx_vssrln_b_h(_1
, _2
);
4704 // CHECK-LABEL: @vssrln_h_w(
4705 // CHECK-NEXT: entry:
4706 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vssrln.h.w(<4 x i32> [[_1:%.*]], <4 x i32> [[_2:%.*]])
4707 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
4709 v8i16
vssrln_h_w(v4i32 _1
, v4i32 _2
) {
4710 return __builtin_lsx_vssrln_h_w(_1
, _2
);
4712 // CHECK-LABEL: @vssrln_w_d(
4713 // CHECK-NEXT: entry:
4714 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vssrln.w.d(<2 x i64> [[_1:%.*]], <2 x i64> [[_2:%.*]])
4715 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4717 v4i32
vssrln_w_d(v2i64 _1
, v2i64 _2
) {
4718 return __builtin_lsx_vssrln_w_d(_1
, _2
);
4720 // CHECK-LABEL: @vorn_v(
4721 // CHECK-NEXT: entry:
4722 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vorn.v(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]])
4723 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4725 v16i8
vorn_v(v16i8 _1
, v16i8 _2
) { return __builtin_lsx_vorn_v(_1
, _2
); }
4726 // CHECK-LABEL: @vldi(
4727 // CHECK-NEXT: entry:
4728 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vldi(i32 1)
4729 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4731 v2i64
vldi() { return __builtin_lsx_vldi(1); }
4732 // CHECK-LABEL: @vshuf_b(
4733 // CHECK-NEXT: entry:
4734 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vshuf.b(<16 x i8> [[_1:%.*]], <16 x i8> [[_2:%.*]], <16 x i8> [[_3:%.*]])
4735 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4737 v16i8
vshuf_b(v16i8 _1
, v16i8 _2
, v16i8 _3
) {
4738 return __builtin_lsx_vshuf_b(_1
, _2
, _3
);
4740 // CHECK-LABEL: @vldx(
4741 // CHECK-NEXT: entry:
4742 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vldx(ptr [[_1:%.*]], i64 1)
4743 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
4745 v16i8
vldx(void *_1
) { return __builtin_lsx_vldx(_1
, 1); }
4746 // CHECK-LABEL: @vstx(
4747 // CHECK-NEXT: entry:
4748 // CHECK-NEXT: tail call void @llvm.loongarch.lsx.vstx(<16 x i8> [[_1:%.*]], ptr [[_2:%.*]], i64 1)
4749 // CHECK-NEXT: ret void
4751 void vstx(v16i8 _1
, void *_2
) { return __builtin_lsx_vstx(_1
, _2
, 1); }
4752 // CHECK-LABEL: @vextl_qu_du(
4753 // CHECK-NEXT: entry:
4754 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vextl.qu.du(<2 x i64> [[_1:%.*]])
4755 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4757 v2u64
vextl_qu_du(v2u64 _1
) { return __builtin_lsx_vextl_qu_du(_1
); }
4758 // CHECK-LABEL: @bnz_b(
4759 // CHECK-NEXT: entry:
4760 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.b(<16 x i8> [[_1:%.*]])
4761 // CHECK-NEXT: ret i32 [[TMP0]]
4763 int bnz_b(v16u8 _1
) { return __builtin_lsx_bnz_b(_1
); }
4764 // CHECK-LABEL: @bnz_d(
4765 // CHECK-NEXT: entry:
4766 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.d(<2 x i64> [[_1:%.*]])
4767 // CHECK-NEXT: ret i32 [[TMP0]]
4769 int bnz_d(v2u64 _1
) { return __builtin_lsx_bnz_d(_1
); }
4770 // CHECK-LABEL: @bnz_h(
4771 // CHECK-NEXT: entry:
4772 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.h(<8 x i16> [[_1:%.*]])
4773 // CHECK-NEXT: ret i32 [[TMP0]]
4775 int bnz_h(v8u16 _1
) { return __builtin_lsx_bnz_h(_1
); }
4776 // CHECK-LABEL: @bnz_v(
4777 // CHECK-NEXT: entry:
4778 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.v(<16 x i8> [[_1:%.*]])
4779 // CHECK-NEXT: ret i32 [[TMP0]]
4781 int bnz_v(v16u8 _1
) { return __builtin_lsx_bnz_v(_1
); }
4782 // CHECK-LABEL: @bnz_w(
4783 // CHECK-NEXT: entry:
4784 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bnz.w(<4 x i32> [[_1:%.*]])
4785 // CHECK-NEXT: ret i32 [[TMP0]]
4787 int bnz_w(v4u32 _1
) { return __builtin_lsx_bnz_w(_1
); }
4788 // CHECK-LABEL: @bz_b(
4789 // CHECK-NEXT: entry:
4790 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.b(<16 x i8> [[_1:%.*]])
4791 // CHECK-NEXT: ret i32 [[TMP0]]
4793 int bz_b(v16u8 _1
) { return __builtin_lsx_bz_b(_1
); }
4794 // CHECK-LABEL: @bz_d(
4795 // CHECK-NEXT: entry:
4796 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.d(<2 x i64> [[_1:%.*]])
4797 // CHECK-NEXT: ret i32 [[TMP0]]
4799 int bz_d(v2u64 _1
) { return __builtin_lsx_bz_d(_1
); }
4800 // CHECK-LABEL: @bz_h(
4801 // CHECK-NEXT: entry:
4802 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.h(<8 x i16> [[_1:%.*]])
4803 // CHECK-NEXT: ret i32 [[TMP0]]
4805 int bz_h(v8u16 _1
) { return __builtin_lsx_bz_h(_1
); }
4806 // CHECK-LABEL: @bz_v(
4807 // CHECK-NEXT: entry:
4808 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.v(<16 x i8> [[_1:%.*]])
4809 // CHECK-NEXT: ret i32 [[TMP0]]
4811 int bz_v(v16u8 _1
) { return __builtin_lsx_bz_v(_1
); }
4812 // CHECK-LABEL: @bz_w(
4813 // CHECK-NEXT: entry:
4814 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lsx.bz.w(<4 x i32> [[_1:%.*]])
4815 // CHECK-NEXT: ret i32 [[TMP0]]
4817 int bz_w(v4u32 _1
) { return __builtin_lsx_bz_w(_1
); }
4818 // CHECK-LABEL: @vfcmp_caf_d(
4819 // CHECK-NEXT: entry:
4820 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.caf.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4821 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4823 v2i64
vfcmp_caf_d(v2f64 _1
, v2f64 _2
) {
4824 return __builtin_lsx_vfcmp_caf_d(_1
, _2
);
4826 // CHECK-LABEL: @vfcmp_caf_s(
4827 // CHECK-NEXT: entry:
4828 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.caf.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4829 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4831 v4i32
vfcmp_caf_s(v4f32 _1
, v4f32 _2
) {
4832 return __builtin_lsx_vfcmp_caf_s(_1
, _2
);
4834 // CHECK-LABEL: @vfcmp_ceq_d(
4835 // CHECK-NEXT: entry:
4836 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.ceq.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4837 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4839 v2i64
vfcmp_ceq_d(v2f64 _1
, v2f64 _2
) {
4840 return __builtin_lsx_vfcmp_ceq_d(_1
, _2
);
4842 // CHECK-LABEL: @vfcmp_ceq_s(
4843 // CHECK-NEXT: entry:
4844 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.ceq.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4845 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4847 v4i32
vfcmp_ceq_s(v4f32 _1
, v4f32 _2
) {
4848 return __builtin_lsx_vfcmp_ceq_s(_1
, _2
);
4850 // CHECK-LABEL: @vfcmp_cle_d(
4851 // CHECK-NEXT: entry:
4852 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cle.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4853 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4855 v2i64
vfcmp_cle_d(v2f64 _1
, v2f64 _2
) {
4856 return __builtin_lsx_vfcmp_cle_d(_1
, _2
);
4858 // CHECK-LABEL: @vfcmp_cle_s(
4859 // CHECK-NEXT: entry:
4860 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cle.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4861 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4863 v4i32
vfcmp_cle_s(v4f32 _1
, v4f32 _2
) {
4864 return __builtin_lsx_vfcmp_cle_s(_1
, _2
);
4866 // CHECK-LABEL: @vfcmp_clt_d(
4867 // CHECK-NEXT: entry:
4868 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.clt.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4869 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4871 v2i64
vfcmp_clt_d(v2f64 _1
, v2f64 _2
) {
4872 return __builtin_lsx_vfcmp_clt_d(_1
, _2
);
4874 // CHECK-LABEL: @vfcmp_clt_s(
4875 // CHECK-NEXT: entry:
4876 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.clt.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4877 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4879 v4i32
vfcmp_clt_s(v4f32 _1
, v4f32 _2
) {
4880 return __builtin_lsx_vfcmp_clt_s(_1
, _2
);
4882 // CHECK-LABEL: @vfcmp_cne_d(
4883 // CHECK-NEXT: entry:
4884 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cne.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4885 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4887 v2i64
vfcmp_cne_d(v2f64 _1
, v2f64 _2
) {
4888 return __builtin_lsx_vfcmp_cne_d(_1
, _2
);
4890 // CHECK-LABEL: @vfcmp_cne_s(
4891 // CHECK-NEXT: entry:
4892 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cne.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4893 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4895 v4i32
vfcmp_cne_s(v4f32 _1
, v4f32 _2
) {
4896 return __builtin_lsx_vfcmp_cne_s(_1
, _2
);
4898 // CHECK-LABEL: @vfcmp_cor_d(
4899 // CHECK-NEXT: entry:
4900 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cor.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4901 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4903 v2i64
vfcmp_cor_d(v2f64 _1
, v2f64 _2
) {
4904 return __builtin_lsx_vfcmp_cor_d(_1
, _2
);
4906 // CHECK-LABEL: @vfcmp_cor_s(
4907 // CHECK-NEXT: entry:
4908 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cor.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4909 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4911 v4i32
vfcmp_cor_s(v4f32 _1
, v4f32 _2
) {
4912 return __builtin_lsx_vfcmp_cor_s(_1
, _2
);
4914 // CHECK-LABEL: @vfcmp_cueq_d(
4915 // CHECK-NEXT: entry:
4916 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cueq.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4917 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4919 v2i64
vfcmp_cueq_d(v2f64 _1
, v2f64 _2
) {
4920 return __builtin_lsx_vfcmp_cueq_d(_1
, _2
);
4922 // CHECK-LABEL: @vfcmp_cueq_s(
4923 // CHECK-NEXT: entry:
4924 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cueq.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4925 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4927 v4i32
vfcmp_cueq_s(v4f32 _1
, v4f32 _2
) {
4928 return __builtin_lsx_vfcmp_cueq_s(_1
, _2
);
4930 // CHECK-LABEL: @vfcmp_cule_d(
4931 // CHECK-NEXT: entry:
4932 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cule.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4933 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4935 v2i64
vfcmp_cule_d(v2f64 _1
, v2f64 _2
) {
4936 return __builtin_lsx_vfcmp_cule_d(_1
, _2
);
4938 // CHECK-LABEL: @vfcmp_cule_s(
4939 // CHECK-NEXT: entry:
4940 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cule.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4941 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4943 v4i32
vfcmp_cule_s(v4f32 _1
, v4f32 _2
) {
4944 return __builtin_lsx_vfcmp_cule_s(_1
, _2
);
4946 // CHECK-LABEL: @vfcmp_cult_d(
4947 // CHECK-NEXT: entry:
4948 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cult.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4949 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4951 v2i64
vfcmp_cult_d(v2f64 _1
, v2f64 _2
) {
4952 return __builtin_lsx_vfcmp_cult_d(_1
, _2
);
4954 // CHECK-LABEL: @vfcmp_cult_s(
4955 // CHECK-NEXT: entry:
4956 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cult.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4957 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4959 v4i32
vfcmp_cult_s(v4f32 _1
, v4f32 _2
) {
4960 return __builtin_lsx_vfcmp_cult_s(_1
, _2
);
4962 // CHECK-LABEL: @vfcmp_cun_d(
4963 // CHECK-NEXT: entry:
4964 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cun.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4965 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4967 v2i64
vfcmp_cun_d(v2f64 _1
, v2f64 _2
) {
4968 return __builtin_lsx_vfcmp_cun_d(_1
, _2
);
4970 // CHECK-LABEL: @vfcmp_cune_d(
4971 // CHECK-NEXT: entry:
4972 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.cune.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4973 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4975 v2i64
vfcmp_cune_d(v2f64 _1
, v2f64 _2
) {
4976 return __builtin_lsx_vfcmp_cune_d(_1
, _2
);
4978 // CHECK-LABEL: @vfcmp_cune_s(
4979 // CHECK-NEXT: entry:
4980 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cune.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4981 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4983 v4i32
vfcmp_cune_s(v4f32 _1
, v4f32 _2
) {
4984 return __builtin_lsx_vfcmp_cune_s(_1
, _2
);
4986 // CHECK-LABEL: @vfcmp_cun_s(
4987 // CHECK-NEXT: entry:
4988 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.cun.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
4989 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
4991 v4i32
vfcmp_cun_s(v4f32 _1
, v4f32 _2
) {
4992 return __builtin_lsx_vfcmp_cun_s(_1
, _2
);
4994 // CHECK-LABEL: @vfcmp_saf_d(
4995 // CHECK-NEXT: entry:
4996 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.saf.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
4997 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
4999 v2i64
vfcmp_saf_d(v2f64 _1
, v2f64 _2
) {
5000 return __builtin_lsx_vfcmp_saf_d(_1
, _2
);
5002 // CHECK-LABEL: @vfcmp_saf_s(
5003 // CHECK-NEXT: entry:
5004 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.saf.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5005 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5007 v4i32
vfcmp_saf_s(v4f32 _1
, v4f32 _2
) {
5008 return __builtin_lsx_vfcmp_saf_s(_1
, _2
);
5010 // CHECK-LABEL: @vfcmp_seq_d(
5011 // CHECK-NEXT: entry:
5012 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.seq.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5013 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5015 v2i64
vfcmp_seq_d(v2f64 _1
, v2f64 _2
) {
5016 return __builtin_lsx_vfcmp_seq_d(_1
, _2
);
5018 // CHECK-LABEL: @vfcmp_seq_s(
5019 // CHECK-NEXT: entry:
5020 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.seq.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5021 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5023 v4i32
vfcmp_seq_s(v4f32 _1
, v4f32 _2
) {
5024 return __builtin_lsx_vfcmp_seq_s(_1
, _2
);
5026 // CHECK-LABEL: @vfcmp_sle_d(
5027 // CHECK-NEXT: entry:
5028 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sle.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5029 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5031 v2i64
vfcmp_sle_d(v2f64 _1
, v2f64 _2
) {
5032 return __builtin_lsx_vfcmp_sle_d(_1
, _2
);
5034 // CHECK-LABEL: @vfcmp_sle_s(
5035 // CHECK-NEXT: entry:
5036 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sle.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5037 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5039 v4i32
vfcmp_sle_s(v4f32 _1
, v4f32 _2
) {
5040 return __builtin_lsx_vfcmp_sle_s(_1
, _2
);
5042 // CHECK-LABEL: @vfcmp_slt_d(
5043 // CHECK-NEXT: entry:
5044 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.slt.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5045 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5047 v2i64
vfcmp_slt_d(v2f64 _1
, v2f64 _2
) {
5048 return __builtin_lsx_vfcmp_slt_d(_1
, _2
);
5050 // CHECK-LABEL: @vfcmp_slt_s(
5051 // CHECK-NEXT: entry:
5052 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.slt.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5053 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5055 v4i32
vfcmp_slt_s(v4f32 _1
, v4f32 _2
) {
5056 return __builtin_lsx_vfcmp_slt_s(_1
, _2
);
5058 // CHECK-LABEL: @vfcmp_sne_d(
5059 // CHECK-NEXT: entry:
5060 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sne.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5061 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5063 v2i64
vfcmp_sne_d(v2f64 _1
, v2f64 _2
) {
5064 return __builtin_lsx_vfcmp_sne_d(_1
, _2
);
5066 // CHECK-LABEL: @vfcmp_sne_s(
5067 // CHECK-NEXT: entry:
5068 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sne.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5069 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5071 v4i32
vfcmp_sne_s(v4f32 _1
, v4f32 _2
) {
5072 return __builtin_lsx_vfcmp_sne_s(_1
, _2
);
5074 // CHECK-LABEL: @vfcmp_sor_d(
5075 // CHECK-NEXT: entry:
5076 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sor.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5077 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5079 v2i64
vfcmp_sor_d(v2f64 _1
, v2f64 _2
) {
5080 return __builtin_lsx_vfcmp_sor_d(_1
, _2
);
5082 // CHECK-LABEL: @vfcmp_sor_s(
5083 // CHECK-NEXT: entry:
5084 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sor.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5085 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5087 v4i32
vfcmp_sor_s(v4f32 _1
, v4f32 _2
) {
5088 return __builtin_lsx_vfcmp_sor_s(_1
, _2
);
5090 // CHECK-LABEL: @vfcmp_sueq_d(
5091 // CHECK-NEXT: entry:
5092 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sueq.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5093 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5095 v2i64
vfcmp_sueq_d(v2f64 _1
, v2f64 _2
) {
5096 return __builtin_lsx_vfcmp_sueq_d(_1
, _2
);
5098 // CHECK-LABEL: @vfcmp_sueq_s(
5099 // CHECK-NEXT: entry:
5100 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sueq.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5101 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5103 v4i32
vfcmp_sueq_s(v4f32 _1
, v4f32 _2
) {
5104 return __builtin_lsx_vfcmp_sueq_s(_1
, _2
);
5106 // CHECK-LABEL: @vfcmp_sule_d(
5107 // CHECK-NEXT: entry:
5108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sule.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5109 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5111 v2i64
vfcmp_sule_d(v2f64 _1
, v2f64 _2
) {
5112 return __builtin_lsx_vfcmp_sule_d(_1
, _2
);
5114 // CHECK-LABEL: @vfcmp_sule_s(
5115 // CHECK-NEXT: entry:
5116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sule.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5117 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5119 v4i32
vfcmp_sule_s(v4f32 _1
, v4f32 _2
) {
5120 return __builtin_lsx_vfcmp_sule_s(_1
, _2
);
5122 // CHECK-LABEL: @vfcmp_sult_d(
5123 // CHECK-NEXT: entry:
5124 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sult.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5125 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5127 v2i64
vfcmp_sult_d(v2f64 _1
, v2f64 _2
) {
5128 return __builtin_lsx_vfcmp_sult_d(_1
, _2
);
5130 // CHECK-LABEL: @vfcmp_sult_s(
5131 // CHECK-NEXT: entry:
5132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sult.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5133 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5135 v4i32
vfcmp_sult_s(v4f32 _1
, v4f32 _2
) {
5136 return __builtin_lsx_vfcmp_sult_s(_1
, _2
);
5138 // CHECK-LABEL: @vfcmp_sun_d(
5139 // CHECK-NEXT: entry:
5140 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sun.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5141 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5143 v2i64
vfcmp_sun_d(v2f64 _1
, v2f64 _2
) {
5144 return __builtin_lsx_vfcmp_sun_d(_1
, _2
);
5146 // CHECK-LABEL: @vfcmp_sune_d(
5147 // CHECK-NEXT: entry:
5148 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vfcmp.sune.d(<2 x double> [[_1:%.*]], <2 x double> [[_2:%.*]])
5149 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5151 v2i64
vfcmp_sune_d(v2f64 _1
, v2f64 _2
) {
5152 return __builtin_lsx_vfcmp_sune_d(_1
, _2
);
5154 // CHECK-LABEL: @vfcmp_sune_s(
5155 // CHECK-NEXT: entry:
5156 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sune.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5157 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5159 v4i32
vfcmp_sune_s(v4f32 _1
, v4f32 _2
) {
5160 return __builtin_lsx_vfcmp_sune_s(_1
, _2
);
5162 // CHECK-LABEL: @vfcmp_sun_s(
5163 // CHECK-NEXT: entry:
5164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vfcmp.sun.s(<4 x float> [[_1:%.*]], <4 x float> [[_2:%.*]])
5165 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5167 v4i32
vfcmp_sun_s(v4f32 _1
, v4f32 _2
) {
5168 return __builtin_lsx_vfcmp_sun_s(_1
, _2
);
5170 // CHECK-LABEL: @vrepli_b(
5171 // CHECK-NEXT: entry:
5172 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i8> @llvm.loongarch.lsx.vrepli.b(i32 1)
5173 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
5175 v16i8
vrepli_b() { return __builtin_lsx_vrepli_b(1); }
5176 // CHECK-LABEL: @vrepli_d(
5177 // CHECK-NEXT: entry:
5178 // CHECK-NEXT: [[TMP0:%.*]] = tail call <2 x i64> @llvm.loongarch.lsx.vrepli.d(i32 1)
5179 // CHECK-NEXT: ret <2 x i64> [[TMP0]]
5181 v2i64
vrepli_d() { return __builtin_lsx_vrepli_d(1); }
5182 // CHECK-LABEL: @vrepli_h(
5183 // CHECK-NEXT: entry:
5184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.loongarch.lsx.vrepli.h(i32 1)
5185 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
5187 v8i16
vrepli_h() { return __builtin_lsx_vrepli_h(1); }
5188 // CHECK-LABEL: @vrepli_w(
5189 // CHECK-NEXT: entry:
5190 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.loongarch.lsx.vrepli.w(i32 1)
5191 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
5193 v4i32
vrepli_w() { return __builtin_lsx_vrepli_w(1); }