Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / LoongArch / lasx / builtin.c
blob0185f2004d526584aa36b31ad10ff4b9954125cf
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple loongarch64 -target-feature +lasx -O2 -emit-llvm %s -o - | FileCheck %s
4 typedef signed char v32i8 __attribute__((vector_size(32), aligned(32)));
5 typedef signed char v32i8_b __attribute__((vector_size(32), aligned(1)));
6 typedef unsigned char v32u8 __attribute__((vector_size(32), aligned(32)));
7 typedef unsigned char v32u8_b __attribute__((vector_size(32), aligned(1)));
8 typedef short v16i16 __attribute__((vector_size(32), aligned(32)));
9 typedef short v16i16_h __attribute__((vector_size(32), aligned(2)));
10 typedef unsigned short v16u16 __attribute__((vector_size(32), aligned(32)));
11 typedef unsigned short v16u16_h __attribute__((vector_size(32), aligned(2)));
12 typedef int v8i32 __attribute__((vector_size(32), aligned(32)));
13 typedef int v8i32_w __attribute__((vector_size(32), aligned(4)));
14 typedef unsigned int v8u32 __attribute__((vector_size(32), aligned(32)));
15 typedef unsigned int v8u32_w __attribute__((vector_size(32), aligned(4)));
16 typedef long long v4i64 __attribute__((vector_size(32), aligned(32)));
17 typedef long long v4i64_d __attribute__((vector_size(32), aligned(8)));
18 typedef unsigned long long v4u64 __attribute__((vector_size(32), aligned(32)));
19 typedef unsigned long long v4u64_d __attribute__((vector_size(32), aligned(8)));
20 typedef float v8f32 __attribute__((vector_size(32), aligned(32)));
21 typedef float v8f32_w __attribute__((vector_size(32), aligned(4)));
22 typedef double v4f64 __attribute__((vector_size(32), aligned(32)));
23 typedef double v4f64_d __attribute__((vector_size(32), aligned(8)));
25 typedef double v4f64 __attribute__((vector_size(32), aligned(32)));
26 typedef double v4f64_d __attribute__((vector_size(32), aligned(8)));
28 // CHECK-LABEL: @xvsll_b(
29 // CHECK-NEXT: entry:
30 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsll.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
31 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
33 v32i8 xvsll_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsll_b(_1, _2); }
34 // CHECK-LABEL: @xvsll_h(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsll.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
37 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
39 v16i16 xvsll_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsll_h(_1, _2); }
40 // CHECK-LABEL: @xvsll_w(
41 // CHECK-NEXT: entry:
42 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsll.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
43 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
45 v8i32 xvsll_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsll_w(_1, _2); }
46 // CHECK-LABEL: @xvsll_d(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsll.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
49 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
51 v4i64 xvsll_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsll_d(_1, _2); }
52 // CHECK-LABEL: @xvslli_b(
53 // CHECK-NEXT: entry:
54 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslli.b(<32 x i8> [[_1:%.*]], i32 1)
55 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
57 v32i8 xvslli_b(v32i8 _1) { return __builtin_lasx_xvslli_b(_1, 1); }
58 // CHECK-LABEL: @xvslli_h(
59 // CHECK-NEXT: entry:
60 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslli.h(<16 x i16> [[_1:%.*]], i32 1)
61 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
63 v16i16 xvslli_h(v16i16 _1) { return __builtin_lasx_xvslli_h(_1, 1); }
64 // CHECK-LABEL: @xvslli_w(
65 // CHECK-NEXT: entry:
66 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslli.w(<8 x i32> [[_1:%.*]], i32 1)
67 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
69 v8i32 xvslli_w(v8i32 _1) { return __builtin_lasx_xvslli_w(_1, 1); }
70 // CHECK-LABEL: @xvslli_d(
71 // CHECK-NEXT: entry:
72 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslli.d(<4 x i64> [[_1:%.*]], i32 1)
73 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
75 v4i64 xvslli_d(v4i64 _1) { return __builtin_lasx_xvslli_d(_1, 1); }
76 // CHECK-LABEL: @xvsra_b(
77 // CHECK-NEXT: entry:
78 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsra.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
79 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
81 v32i8 xvsra_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsra_b(_1, _2); }
82 // CHECK-LABEL: @xvsra_h(
83 // CHECK-NEXT: entry:
84 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsra.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
85 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
87 v16i16 xvsra_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsra_h(_1, _2); }
88 // CHECK-LABEL: @xvsra_w(
89 // CHECK-NEXT: entry:
90 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsra.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
91 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
93 v8i32 xvsra_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsra_w(_1, _2); }
94 // CHECK-LABEL: @xvsra_d(
95 // CHECK-NEXT: entry:
96 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsra.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
97 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
99 v4i64 xvsra_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsra_d(_1, _2); }
100 // CHECK-LABEL: @xvsrai_b(
101 // CHECK-NEXT: entry:
102 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrai.b(<32 x i8> [[_1:%.*]], i32 1)
103 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
105 v32i8 xvsrai_b(v32i8 _1) { return __builtin_lasx_xvsrai_b(_1, 1); }
106 // CHECK-LABEL: @xvsrai_h(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrai.h(<16 x i16> [[_1:%.*]], i32 1)
109 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
111 v16i16 xvsrai_h(v16i16 _1) { return __builtin_lasx_xvsrai_h(_1, 1); }
112 // CHECK-LABEL: @xvsrai_w(
113 // CHECK-NEXT: entry:
114 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrai.w(<8 x i32> [[_1:%.*]], i32 1)
115 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
117 v8i32 xvsrai_w(v8i32 _1) { return __builtin_lasx_xvsrai_w(_1, 1); }
118 // CHECK-LABEL: @xvsrai_d(
119 // CHECK-NEXT: entry:
120 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrai.d(<4 x i64> [[_1:%.*]], i32 1)
121 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
123 v4i64 xvsrai_d(v4i64 _1) { return __builtin_lasx_xvsrai_d(_1, 1); }
124 // CHECK-LABEL: @xvsrar_b(
125 // CHECK-NEXT: entry:
126 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrar.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
127 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
129 v32i8 xvsrar_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrar_b(_1, _2); }
130 // CHECK-LABEL: @xvsrar_h(
131 // CHECK-NEXT: entry:
132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrar.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
133 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
135 v16i16 xvsrar_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrar_h(_1, _2); }
136 // CHECK-LABEL: @xvsrar_w(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrar.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
139 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
141 v8i32 xvsrar_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrar_w(_1, _2); }
142 // CHECK-LABEL: @xvsrar_d(
143 // CHECK-NEXT: entry:
144 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrar.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
145 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
147 v4i64 xvsrar_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrar_d(_1, _2); }
148 // CHECK-LABEL: @xvsrari_b(
149 // CHECK-NEXT: entry:
150 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrari.b(<32 x i8> [[_1:%.*]], i32 1)
151 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
153 v32i8 xvsrari_b(v32i8 _1) { return __builtin_lasx_xvsrari_b(_1, 1); }
154 // CHECK-LABEL: @xvsrari_h(
155 // CHECK-NEXT: entry:
156 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrari.h(<16 x i16> [[_1:%.*]], i32 1)
157 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
159 v16i16 xvsrari_h(v16i16 _1) { return __builtin_lasx_xvsrari_h(_1, 1); }
160 // CHECK-LABEL: @xvsrari_w(
161 // CHECK-NEXT: entry:
162 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrari.w(<8 x i32> [[_1:%.*]], i32 1)
163 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
165 v8i32 xvsrari_w(v8i32 _1) { return __builtin_lasx_xvsrari_w(_1, 1); }
166 // CHECK-LABEL: @xvsrari_d(
167 // CHECK-NEXT: entry:
168 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrari.d(<4 x i64> [[_1:%.*]], i32 1)
169 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
171 v4i64 xvsrari_d(v4i64 _1) { return __builtin_lasx_xvsrari_d(_1, 1); }
172 // CHECK-LABEL: @xvsrl_b(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrl.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
175 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
177 v32i8 xvsrl_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrl_b(_1, _2); }
178 // CHECK-LABEL: @xvsrl_h(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrl.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
181 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
183 v16i16 xvsrl_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrl_h(_1, _2); }
184 // CHECK-LABEL: @xvsrl_w(
185 // CHECK-NEXT: entry:
186 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrl.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
187 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
189 v8i32 xvsrl_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrl_w(_1, _2); }
190 // CHECK-LABEL: @xvsrl_d(
191 // CHECK-NEXT: entry:
192 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrl.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
193 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
195 v4i64 xvsrl_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrl_d(_1, _2); }
196 // CHECK-LABEL: @xvsrli_b(
197 // CHECK-NEXT: entry:
198 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrli.b(<32 x i8> [[_1:%.*]], i32 1)
199 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
201 v32i8 xvsrli_b(v32i8 _1) { return __builtin_lasx_xvsrli_b(_1, 1); }
202 // CHECK-LABEL: @xvsrli_h(
203 // CHECK-NEXT: entry:
204 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrli.h(<16 x i16> [[_1:%.*]], i32 1)
205 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
207 v16i16 xvsrli_h(v16i16 _1) { return __builtin_lasx_xvsrli_h(_1, 1); }
208 // CHECK-LABEL: @xvsrli_w(
209 // CHECK-NEXT: entry:
210 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrli.w(<8 x i32> [[_1:%.*]], i32 1)
211 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
213 v8i32 xvsrli_w(v8i32 _1) { return __builtin_lasx_xvsrli_w(_1, 1); }
214 // CHECK-LABEL: @xvsrli_d(
215 // CHECK-NEXT: entry:
216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrli.d(<4 x i64> [[_1:%.*]], i32 1)
217 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
219 v4i64 xvsrli_d(v4i64 _1) { return __builtin_lasx_xvsrli_d(_1, 1); }
220 // CHECK-LABEL: @xvsrlr_b(
221 // CHECK-NEXT: entry:
222 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrlr.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
223 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
225 v32i8 xvsrlr_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrlr_b(_1, _2); }
226 // CHECK-LABEL: @xvsrlr_h(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrlr.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
229 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
231 v16i16 xvsrlr_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrlr_h(_1, _2); }
232 // CHECK-LABEL: @xvsrlr_w(
233 // CHECK-NEXT: entry:
234 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrlr.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
235 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
237 v8i32 xvsrlr_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrlr_w(_1, _2); }
238 // CHECK-LABEL: @xvsrlr_d(
239 // CHECK-NEXT: entry:
240 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrlr.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
241 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
243 v4i64 xvsrlr_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrlr_d(_1, _2); }
244 // CHECK-LABEL: @xvsrlri_b(
245 // CHECK-NEXT: entry:
246 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrlri.b(<32 x i8> [[_1:%.*]], i32 1)
247 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
249 v32i8 xvsrlri_b(v32i8 _1) { return __builtin_lasx_xvsrlri_b(_1, 1); }
250 // CHECK-LABEL: @xvsrlri_h(
251 // CHECK-NEXT: entry:
252 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrlri.h(<16 x i16> [[_1:%.*]], i32 1)
253 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
255 v16i16 xvsrlri_h(v16i16 _1) { return __builtin_lasx_xvsrlri_h(_1, 1); }
256 // CHECK-LABEL: @xvsrlri_w(
257 // CHECK-NEXT: entry:
258 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrlri.w(<8 x i32> [[_1:%.*]], i32 1)
259 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
261 v8i32 xvsrlri_w(v8i32 _1) { return __builtin_lasx_xvsrlri_w(_1, 1); }
262 // CHECK-LABEL: @xvsrlri_d(
263 // CHECK-NEXT: entry:
264 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrlri.d(<4 x i64> [[_1:%.*]], i32 1)
265 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
267 v4i64 xvsrlri_d(v4i64 _1) { return __builtin_lasx_xvsrlri_d(_1, 1); }
268 // CHECK-LABEL: @xvbitclr_b(
269 // CHECK-NEXT: entry:
270 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitclr.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
271 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
273 v32u8 xvbitclr_b(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvbitclr_b(_1, _2); }
274 // CHECK-LABEL: @xvbitclr_h(
275 // CHECK-NEXT: entry:
276 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitclr.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
277 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
279 v16u16 xvbitclr_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvbitclr_h(_1, _2); }
280 // CHECK-LABEL: @xvbitclr_w(
281 // CHECK-NEXT: entry:
282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitclr.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
283 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
285 v8u32 xvbitclr_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvbitclr_w(_1, _2); }
286 // CHECK-LABEL: @xvbitclr_d(
287 // CHECK-NEXT: entry:
288 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitclr.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
289 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
291 v4u64 xvbitclr_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvbitclr_d(_1, _2); }
292 // CHECK-LABEL: @xvbitclri_b(
293 // CHECK-NEXT: entry:
294 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitclri.b(<32 x i8> [[_1:%.*]], i32 1)
295 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
297 v32u8 xvbitclri_b(v32u8 _1) { return __builtin_lasx_xvbitclri_b(_1, 1); }
298 // CHECK-LABEL: @xvbitclri_h(
299 // CHECK-NEXT: entry:
300 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitclri.h(<16 x i16> [[_1:%.*]], i32 1)
301 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
303 v16u16 xvbitclri_h(v16u16 _1) { return __builtin_lasx_xvbitclri_h(_1, 1); }
304 // CHECK-LABEL: @xvbitclri_w(
305 // CHECK-NEXT: entry:
306 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitclri.w(<8 x i32> [[_1:%.*]], i32 1)
307 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
309 v8u32 xvbitclri_w(v8u32 _1) { return __builtin_lasx_xvbitclri_w(_1, 1); }
310 // CHECK-LABEL: @xvbitclri_d(
311 // CHECK-NEXT: entry:
312 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitclri.d(<4 x i64> [[_1:%.*]], i32 1)
313 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
315 v4u64 xvbitclri_d(v4u64 _1) { return __builtin_lasx_xvbitclri_d(_1, 1); }
316 // CHECK-LABEL: @xvbitset_b(
317 // CHECK-NEXT: entry:
318 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitset.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
319 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
321 v32u8 xvbitset_b(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvbitset_b(_1, _2); }
322 // CHECK-LABEL: @xvbitset_h(
323 // CHECK-NEXT: entry:
324 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitset.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
325 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
327 v16u16 xvbitset_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvbitset_h(_1, _2); }
328 // CHECK-LABEL: @xvbitset_w(
329 // CHECK-NEXT: entry:
330 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitset.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
331 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
333 v8u32 xvbitset_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvbitset_w(_1, _2); }
334 // CHECK-LABEL: @xvbitset_d(
335 // CHECK-NEXT: entry:
336 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitset.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
337 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
339 v4u64 xvbitset_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvbitset_d(_1, _2); }
340 // CHECK-LABEL: @xvbitseti_b(
341 // CHECK-NEXT: entry:
342 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitseti.b(<32 x i8> [[_1:%.*]], i32 1)
343 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
345 v32u8 xvbitseti_b(v32u8 _1) { return __builtin_lasx_xvbitseti_b(_1, 1); }
346 // CHECK-LABEL: @xvbitseti_h(
347 // CHECK-NEXT: entry:
348 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitseti.h(<16 x i16> [[_1:%.*]], i32 1)
349 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
351 v16u16 xvbitseti_h(v16u16 _1) { return __builtin_lasx_xvbitseti_h(_1, 1); }
352 // CHECK-LABEL: @xvbitseti_w(
353 // CHECK-NEXT: entry:
354 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitseti.w(<8 x i32> [[_1:%.*]], i32 1)
355 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
357 v8u32 xvbitseti_w(v8u32 _1) { return __builtin_lasx_xvbitseti_w(_1, 1); }
358 // CHECK-LABEL: @xvbitseti_d(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitseti.d(<4 x i64> [[_1:%.*]], i32 1)
361 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
363 v4u64 xvbitseti_d(v4u64 _1) { return __builtin_lasx_xvbitseti_d(_1, 1); }
364 // CHECK-LABEL: @xvbitrev_b(
365 // CHECK-NEXT: entry:
366 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitrev.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
367 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
369 v32u8 xvbitrev_b(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvbitrev_b(_1, _2); }
370 // CHECK-LABEL: @xvbitrev_h(
371 // CHECK-NEXT: entry:
372 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitrev.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
373 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
375 v16u16 xvbitrev_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvbitrev_h(_1, _2); }
376 // CHECK-LABEL: @xvbitrev_w(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitrev.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
379 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
381 v8u32 xvbitrev_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvbitrev_w(_1, _2); }
382 // CHECK-LABEL: @xvbitrev_d(
383 // CHECK-NEXT: entry:
384 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitrev.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
385 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
387 v4u64 xvbitrev_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvbitrev_d(_1, _2); }
388 // CHECK-LABEL: @xvbitrevi_b(
389 // CHECK-NEXT: entry:
390 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitrevi.b(<32 x i8> [[_1:%.*]], i32 1)
391 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
393 v32u8 xvbitrevi_b(v32u8 _1) { return __builtin_lasx_xvbitrevi_b(_1, 1); }
394 // CHECK-LABEL: @xvbitrevi_h(
395 // CHECK-NEXT: entry:
396 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvbitrevi.h(<16 x i16> [[_1:%.*]], i32 1)
397 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
399 v16u16 xvbitrevi_h(v16u16 _1) { return __builtin_lasx_xvbitrevi_h(_1, 1); }
400 // CHECK-LABEL: @xvbitrevi_w(
401 // CHECK-NEXT: entry:
402 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvbitrevi.w(<8 x i32> [[_1:%.*]], i32 1)
403 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
405 v8u32 xvbitrevi_w(v8u32 _1) { return __builtin_lasx_xvbitrevi_w(_1, 1); }
406 // CHECK-LABEL: @xvbitrevi_d(
407 // CHECK-NEXT: entry:
408 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvbitrevi.d(<4 x i64> [[_1:%.*]], i32 1)
409 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
411 v4u64 xvbitrevi_d(v4u64 _1) { return __builtin_lasx_xvbitrevi_d(_1, 1); }
412 // CHECK-LABEL: @xvadd_b(
413 // CHECK-NEXT: entry:
414 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvadd.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
415 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
417 v32i8 xvadd_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvadd_b(_1, _2); }
418 // CHECK-LABEL: @xvadd_h(
419 // CHECK-NEXT: entry:
420 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvadd.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
421 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
423 v16i16 xvadd_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvadd_h(_1, _2); }
424 // CHECK-LABEL: @xvadd_w(
425 // CHECK-NEXT: entry:
426 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvadd.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
427 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
429 v8i32 xvadd_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvadd_w(_1, _2); }
430 // CHECK-LABEL: @xvadd_d(
431 // CHECK-NEXT: entry:
432 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvadd.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
433 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
435 v4i64 xvadd_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvadd_d(_1, _2); }
436 // CHECK-LABEL: @xvaddi_bu(
437 // CHECK-NEXT: entry:
438 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvaddi.bu(<32 x i8> [[_1:%.*]], i32 1)
439 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
441 v32i8 xvaddi_bu(v32i8 _1) { return __builtin_lasx_xvaddi_bu(_1, 1); }
442 // CHECK-LABEL: @xvaddi_hu(
443 // CHECK-NEXT: entry:
444 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddi.hu(<16 x i16> [[_1:%.*]], i32 1)
445 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
447 v16i16 xvaddi_hu(v16i16 _1) { return __builtin_lasx_xvaddi_hu(_1, 1); }
448 // CHECK-LABEL: @xvaddi_wu(
449 // CHECK-NEXT: entry:
450 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddi.wu(<8 x i32> [[_1:%.*]], i32 1)
451 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
453 v8i32 xvaddi_wu(v8i32 _1) { return __builtin_lasx_xvaddi_wu(_1, 1); }
454 // CHECK-LABEL: @xvaddi_du(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddi.du(<4 x i64> [[_1:%.*]], i32 1)
457 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
459 v4i64 xvaddi_du(v4i64 _1) { return __builtin_lasx_xvaddi_du(_1, 1); }
460 // CHECK-LABEL: @xvsub_b(
461 // CHECK-NEXT: entry:
462 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsub.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
463 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
465 v32i8 xvsub_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsub_b(_1, _2); }
466 // CHECK-LABEL: @xvsub_h(
467 // CHECK-NEXT: entry:
468 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsub.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
469 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
471 v16i16 xvsub_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsub_h(_1, _2); }
472 // CHECK-LABEL: @xvsub_w(
473 // CHECK-NEXT: entry:
474 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsub.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
475 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
477 v8i32 xvsub_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsub_w(_1, _2); }
478 // CHECK-LABEL: @xvsub_d(
479 // CHECK-NEXT: entry:
480 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsub.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
481 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
483 v4i64 xvsub_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsub_d(_1, _2); }
484 // CHECK-LABEL: @xvsubi_bu(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsubi.bu(<32 x i8> [[_1:%.*]], i32 1)
487 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
489 v32i8 xvsubi_bu(v32i8 _1) { return __builtin_lasx_xvsubi_bu(_1, 1); }
490 // CHECK-LABEL: @xvsubi_hu(
491 // CHECK-NEXT: entry:
492 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsubi.hu(<16 x i16> [[_1:%.*]], i32 1)
493 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
495 v16i16 xvsubi_hu(v16i16 _1) { return __builtin_lasx_xvsubi_hu(_1, 1); }
496 // CHECK-LABEL: @xvsubi_wu(
497 // CHECK-NEXT: entry:
498 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsubi.wu(<8 x i32> [[_1:%.*]], i32 1)
499 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
501 v8i32 xvsubi_wu(v8i32 _1) { return __builtin_lasx_xvsubi_wu(_1, 1); }
502 // CHECK-LABEL: @xvsubi_du(
503 // CHECK-NEXT: entry:
504 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubi.du(<4 x i64> [[_1:%.*]], i32 1)
505 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
507 v4i64 xvsubi_du(v4i64 _1) { return __builtin_lasx_xvsubi_du(_1, 1); }
508 // CHECK-LABEL: @xvmax_b(
509 // CHECK-NEXT: entry:
510 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmax.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
511 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
513 v32i8 xvmax_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmax_b(_1, _2); }
514 // CHECK-LABEL: @xvmax_h(
515 // CHECK-NEXT: entry:
516 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmax.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
517 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
519 v16i16 xvmax_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmax_h(_1, _2); }
520 // CHECK-LABEL: @xvmax_w(
521 // CHECK-NEXT: entry:
522 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmax.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
523 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
525 v8i32 xvmax_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmax_w(_1, _2); }
526 // CHECK-LABEL: @xvmax_d(
527 // CHECK-NEXT: entry:
528 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmax.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
529 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
531 v4i64 xvmax_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmax_d(_1, _2); }
532 // CHECK-LABEL: @xvmaxi_b(
533 // CHECK-NEXT: entry:
534 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmaxi.b(<32 x i8> [[_1:%.*]], i32 1)
535 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
537 v32i8 xvmaxi_b(v32i8 _1) { return __builtin_lasx_xvmaxi_b(_1, 1); }
538 // CHECK-LABEL: @xvmaxi_h(
539 // CHECK-NEXT: entry:
540 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaxi.h(<16 x i16> [[_1:%.*]], i32 1)
541 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
543 v16i16 xvmaxi_h(v16i16 _1) { return __builtin_lasx_xvmaxi_h(_1, 1); }
544 // CHECK-LABEL: @xvmaxi_w(
545 // CHECK-NEXT: entry:
546 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaxi.w(<8 x i32> [[_1:%.*]], i32 1)
547 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
549 v8i32 xvmaxi_w(v8i32 _1) { return __builtin_lasx_xvmaxi_w(_1, 1); }
550 // CHECK-LABEL: @xvmaxi_d(
551 // CHECK-NEXT: entry:
552 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaxi.d(<4 x i64> [[_1:%.*]], i32 1)
553 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
555 v4i64 xvmaxi_d(v4i64 _1) { return __builtin_lasx_xvmaxi_d(_1, 1); }
556 // CHECK-LABEL: @xvmax_bu(
557 // CHECK-NEXT: entry:
558 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmax.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
559 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
561 v32u8 xvmax_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmax_bu(_1, _2); }
562 // CHECK-LABEL: @xvmax_hu(
563 // CHECK-NEXT: entry:
564 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmax.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
565 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
567 v16u16 xvmax_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmax_hu(_1, _2); }
568 // CHECK-LABEL: @xvmax_wu(
569 // CHECK-NEXT: entry:
570 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmax.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
571 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
573 v8u32 xvmax_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmax_wu(_1, _2); }
574 // CHECK-LABEL: @xvmax_du(
575 // CHECK-NEXT: entry:
576 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmax.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
577 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
579 v4u64 xvmax_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmax_du(_1, _2); }
580 // CHECK-LABEL: @xvmaxi_bu(
581 // CHECK-NEXT: entry:
582 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmaxi.bu(<32 x i8> [[_1:%.*]], i32 1)
583 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
585 v32u8 xvmaxi_bu(v32u8 _1) { return __builtin_lasx_xvmaxi_bu(_1, 1); }
586 // CHECK-LABEL: @xvmaxi_hu(
587 // CHECK-NEXT: entry:
588 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaxi.hu(<16 x i16> [[_1:%.*]], i32 1)
589 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
591 v16u16 xvmaxi_hu(v16u16 _1) { return __builtin_lasx_xvmaxi_hu(_1, 1); }
592 // CHECK-LABEL: @xvmaxi_wu(
593 // CHECK-NEXT: entry:
594 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaxi.wu(<8 x i32> [[_1:%.*]], i32 1)
595 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
597 v8u32 xvmaxi_wu(v8u32 _1) { return __builtin_lasx_xvmaxi_wu(_1, 1); }
598 // CHECK-LABEL: @xvmaxi_du(
599 // CHECK-NEXT: entry:
600 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaxi.du(<4 x i64> [[_1:%.*]], i32 1)
601 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
603 v4u64 xvmaxi_du(v4u64 _1) { return __builtin_lasx_xvmaxi_du(_1, 1); }
604 // CHECK-LABEL: @xvmin_b(
605 // CHECK-NEXT: entry:
606 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmin.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
607 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
609 v32i8 xvmin_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmin_b(_1, _2); }
610 // CHECK-LABEL: @xvmin_h(
611 // CHECK-NEXT: entry:
612 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmin.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
613 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
615 v16i16 xvmin_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmin_h(_1, _2); }
616 // CHECK-LABEL: @xvmin_w(
617 // CHECK-NEXT: entry:
618 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmin.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
619 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
621 v8i32 xvmin_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmin_w(_1, _2); }
622 // CHECK-LABEL: @xvmin_d(
623 // CHECK-NEXT: entry:
624 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmin.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
625 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
627 v4i64 xvmin_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmin_d(_1, _2); }
628 // CHECK-LABEL: @xvmini_b(
629 // CHECK-NEXT: entry:
630 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmini.b(<32 x i8> [[_1:%.*]], i32 1)
631 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
633 v32i8 xvmini_b(v32i8 _1) { return __builtin_lasx_xvmini_b(_1, 1); }
634 // CHECK-LABEL: @xvmini_h(
635 // CHECK-NEXT: entry:
636 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmini.h(<16 x i16> [[_1:%.*]], i32 1)
637 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
639 v16i16 xvmini_h(v16i16 _1) { return __builtin_lasx_xvmini_h(_1, 1); }
640 // CHECK-LABEL: @xvmini_w(
641 // CHECK-NEXT: entry:
642 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmini.w(<8 x i32> [[_1:%.*]], i32 1)
643 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
645 v8i32 xvmini_w(v8i32 _1) { return __builtin_lasx_xvmini_w(_1, 1); }
646 // CHECK-LABEL: @xvmini_d(
647 // CHECK-NEXT: entry:
648 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmini.d(<4 x i64> [[_1:%.*]], i32 1)
649 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
651 v4i64 xvmini_d(v4i64 _1) { return __builtin_lasx_xvmini_d(_1, 1); }
652 // CHECK-LABEL: @xvmin_bu(
653 // CHECK-NEXT: entry:
654 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmin.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
655 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
657 v32u8 xvmin_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmin_bu(_1, _2); }
658 // CHECK-LABEL: @xvmin_hu(
659 // CHECK-NEXT: entry:
660 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmin.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
661 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
663 v16u16 xvmin_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmin_hu(_1, _2); }
664 // CHECK-LABEL: @xvmin_wu(
665 // CHECK-NEXT: entry:
666 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmin.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
667 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
669 v8u32 xvmin_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmin_wu(_1, _2); }
670 // CHECK-LABEL: @xvmin_du(
671 // CHECK-NEXT: entry:
672 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmin.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
673 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
675 v4u64 xvmin_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmin_du(_1, _2); }
676 // CHECK-LABEL: @xvmini_bu(
677 // CHECK-NEXT: entry:
678 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmini.bu(<32 x i8> [[_1:%.*]], i32 1)
679 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
681 v32u8 xvmini_bu(v32u8 _1) { return __builtin_lasx_xvmini_bu(_1, 1); }
682 // CHECK-LABEL: @xvmini_hu(
683 // CHECK-NEXT: entry:
684 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmini.hu(<16 x i16> [[_1:%.*]], i32 1)
685 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
687 v16u16 xvmini_hu(v16u16 _1) { return __builtin_lasx_xvmini_hu(_1, 1); }
688 // CHECK-LABEL: @xvmini_wu(
689 // CHECK-NEXT: entry:
690 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmini.wu(<8 x i32> [[_1:%.*]], i32 1)
691 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
693 v8u32 xvmini_wu(v8u32 _1) { return __builtin_lasx_xvmini_wu(_1, 1); }
694 // CHECK-LABEL: @xvmini_du(
695 // CHECK-NEXT: entry:
696 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmini.du(<4 x i64> [[_1:%.*]], i32 1)
697 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
699 v4u64 xvmini_du(v4u64 _1) { return __builtin_lasx_xvmini_du(_1, 1); }
700 // CHECK-LABEL: @xvseq_b(
701 // CHECK-NEXT: entry:
702 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvseq.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
703 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
705 v32i8 xvseq_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvseq_b(_1, _2); }
706 // CHECK-LABEL: @xvseq_h(
707 // CHECK-NEXT: entry:
708 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvseq.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
709 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
711 v16i16 xvseq_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvseq_h(_1, _2); }
712 // CHECK-LABEL: @xvseq_w(
713 // CHECK-NEXT: entry:
714 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvseq.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
715 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
717 v8i32 xvseq_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvseq_w(_1, _2); }
718 // CHECK-LABEL: @xvseq_d(
719 // CHECK-NEXT: entry:
720 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvseq.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
721 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
723 v4i64 xvseq_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvseq_d(_1, _2); }
724 // CHECK-LABEL: @xvseqi_b(
725 // CHECK-NEXT: entry:
726 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvseqi.b(<32 x i8> [[_1:%.*]], i32 1)
727 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
729 v32i8 xvseqi_b(v32i8 _1) { return __builtin_lasx_xvseqi_b(_1, 1); }
730 // CHECK-LABEL: @xvseqi_h(
731 // CHECK-NEXT: entry:
732 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvseqi.h(<16 x i16> [[_1:%.*]], i32 1)
733 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
735 v16i16 xvseqi_h(v16i16 _1) { return __builtin_lasx_xvseqi_h(_1, 1); }
736 // CHECK-LABEL: @xvseqi_w(
737 // CHECK-NEXT: entry:
738 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvseqi.w(<8 x i32> [[_1:%.*]], i32 1)
739 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
741 v8i32 xvseqi_w(v8i32 _1) { return __builtin_lasx_xvseqi_w(_1, 1); }
742 // CHECK-LABEL: @xvseqi_d(
743 // CHECK-NEXT: entry:
744 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvseqi.d(<4 x i64> [[_1:%.*]], i32 1)
745 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
747 v4i64 xvseqi_d(v4i64 _1) { return __builtin_lasx_xvseqi_d(_1, 1); }
748 // CHECK-LABEL: @xvslt_b(
749 // CHECK-NEXT: entry:
750 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslt.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
751 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
753 v32i8 xvslt_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvslt_b(_1, _2); }
754 // CHECK-LABEL: @xvslt_h(
755 // CHECK-NEXT: entry:
756 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslt.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
757 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
759 v16i16 xvslt_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvslt_h(_1, _2); }
760 // CHECK-LABEL: @xvslt_w(
761 // CHECK-NEXT: entry:
762 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslt.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
763 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
765 v8i32 xvslt_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvslt_w(_1, _2); }
766 // CHECK-LABEL: @xvslt_d(
767 // CHECK-NEXT: entry:
768 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslt.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
769 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
771 v4i64 xvslt_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvslt_d(_1, _2); }
772 // CHECK-LABEL: @xvslti_b(
773 // CHECK-NEXT: entry:
774 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslti.b(<32 x i8> [[_1:%.*]], i32 1)
775 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
777 v32i8 xvslti_b(v32i8 _1) { return __builtin_lasx_xvslti_b(_1, 1); }
778 // CHECK-LABEL: @xvslti_h(
779 // CHECK-NEXT: entry:
780 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslti.h(<16 x i16> [[_1:%.*]], i32 1)
781 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
783 v16i16 xvslti_h(v16i16 _1) { return __builtin_lasx_xvslti_h(_1, 1); }
784 // CHECK-LABEL: @xvslti_w(
785 // CHECK-NEXT: entry:
786 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslti.w(<8 x i32> [[_1:%.*]], i32 1)
787 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
789 v8i32 xvslti_w(v8i32 _1) { return __builtin_lasx_xvslti_w(_1, 1); }
790 // CHECK-LABEL: @xvslti_d(
791 // CHECK-NEXT: entry:
792 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslti.d(<4 x i64> [[_1:%.*]], i32 1)
793 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
795 v4i64 xvslti_d(v4i64 _1) { return __builtin_lasx_xvslti_d(_1, 1); }
796 // CHECK-LABEL: @xvslt_bu(
797 // CHECK-NEXT: entry:
798 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslt.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
799 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
801 v32i8 xvslt_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvslt_bu(_1, _2); }
802 // CHECK-LABEL: @xvslt_hu(
803 // CHECK-NEXT: entry:
804 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslt.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
805 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
807 v16i16 xvslt_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvslt_hu(_1, _2); }
808 // CHECK-LABEL: @xvslt_wu(
809 // CHECK-NEXT: entry:
810 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslt.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
811 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
813 v8i32 xvslt_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvslt_wu(_1, _2); }
814 // CHECK-LABEL: @xvslt_du(
815 // CHECK-NEXT: entry:
816 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslt.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
817 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
819 v4i64 xvslt_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvslt_du(_1, _2); }
820 // CHECK-LABEL: @xvslti_bu(
821 // CHECK-NEXT: entry:
822 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslti.bu(<32 x i8> [[_1:%.*]], i32 1)
823 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
825 v32i8 xvslti_bu(v32u8 _1) { return __builtin_lasx_xvslti_bu(_1, 1); }
826 // CHECK-LABEL: @xvslti_hu(
827 // CHECK-NEXT: entry:
828 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslti.hu(<16 x i16> [[_1:%.*]], i32 1)
829 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
831 v16i16 xvslti_hu(v16u16 _1) { return __builtin_lasx_xvslti_hu(_1, 1); }
832 // CHECK-LABEL: @xvslti_wu(
833 // CHECK-NEXT: entry:
834 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslti.wu(<8 x i32> [[_1:%.*]], i32 1)
835 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
837 v8i32 xvslti_wu(v8u32 _1) { return __builtin_lasx_xvslti_wu(_1, 1); }
838 // CHECK-LABEL: @xvslti_du(
839 // CHECK-NEXT: entry:
840 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslti.du(<4 x i64> [[_1:%.*]], i32 1)
841 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
843 v4i64 xvslti_du(v4u64 _1) { return __builtin_lasx_xvslti_du(_1, 1); }
844 // CHECK-LABEL: @xvsle_b(
845 // CHECK-NEXT: entry:
846 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsle.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
847 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
849 v32i8 xvsle_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsle_b(_1, _2); }
850 // CHECK-LABEL: @xvsle_h(
851 // CHECK-NEXT: entry:
852 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsle.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
853 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
855 v16i16 xvsle_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsle_h(_1, _2); }
856 // CHECK-LABEL: @xvsle_w(
857 // CHECK-NEXT: entry:
858 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsle.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
859 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
861 v8i32 xvsle_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsle_w(_1, _2); }
862 // CHECK-LABEL: @xvsle_d(
863 // CHECK-NEXT: entry:
864 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsle.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
865 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
867 v4i64 xvsle_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsle_d(_1, _2); }
868 // CHECK-LABEL: @xvslei_b(
869 // CHECK-NEXT: entry:
870 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslei.b(<32 x i8> [[_1:%.*]], i32 1)
871 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
873 v32i8 xvslei_b(v32i8 _1) { return __builtin_lasx_xvslei_b(_1, 1); }
874 // CHECK-LABEL: @xvslei_h(
875 // CHECK-NEXT: entry:
876 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslei.h(<16 x i16> [[_1:%.*]], i32 1)
877 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
879 v16i16 xvslei_h(v16i16 _1) { return __builtin_lasx_xvslei_h(_1, 1); }
880 // CHECK-LABEL: @xvslei_w(
881 // CHECK-NEXT: entry:
882 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslei.w(<8 x i32> [[_1:%.*]], i32 1)
883 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
885 v8i32 xvslei_w(v8i32 _1) { return __builtin_lasx_xvslei_w(_1, 1); }
886 // CHECK-LABEL: @xvslei_d(
887 // CHECK-NEXT: entry:
888 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslei.d(<4 x i64> [[_1:%.*]], i32 1)
889 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
891 v4i64 xvslei_d(v4i64 _1) { return __builtin_lasx_xvslei_d(_1, 1); }
892 // CHECK-LABEL: @xvsle_bu(
893 // CHECK-NEXT: entry:
894 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsle.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
895 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
897 v32i8 xvsle_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvsle_bu(_1, _2); }
898 // CHECK-LABEL: @xvsle_hu(
899 // CHECK-NEXT: entry:
900 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsle.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
901 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
903 v16i16 xvsle_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvsle_hu(_1, _2); }
904 // CHECK-LABEL: @xvsle_wu(
905 // CHECK-NEXT: entry:
906 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsle.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
907 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
909 v8i32 xvsle_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvsle_wu(_1, _2); }
910 // CHECK-LABEL: @xvsle_du(
911 // CHECK-NEXT: entry:
912 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsle.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
913 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
915 v4i64 xvsle_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvsle_du(_1, _2); }
916 // CHECK-LABEL: @xvslei_bu(
917 // CHECK-NEXT: entry:
918 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvslei.bu(<32 x i8> [[_1:%.*]], i32 1)
919 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
921 v32i8 xvslei_bu(v32u8 _1) { return __builtin_lasx_xvslei_bu(_1, 1); }
922 // CHECK-LABEL: @xvslei_hu(
923 // CHECK-NEXT: entry:
924 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvslei.hu(<16 x i16> [[_1:%.*]], i32 1)
925 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
927 v16i16 xvslei_hu(v16u16 _1) { return __builtin_lasx_xvslei_hu(_1, 1); }
928 // CHECK-LABEL: @xvslei_wu(
929 // CHECK-NEXT: entry:
930 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvslei.wu(<8 x i32> [[_1:%.*]], i32 1)
931 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
933 v8i32 xvslei_wu(v8u32 _1) { return __builtin_lasx_xvslei_wu(_1, 1); }
934 // CHECK-LABEL: @xvslei_du(
935 // CHECK-NEXT: entry:
936 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvslei.du(<4 x i64> [[_1:%.*]], i32 1)
937 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
939 v4i64 xvslei_du(v4u64 _1) { return __builtin_lasx_xvslei_du(_1, 1); }
940 // CHECK-LABEL: @xvsat_b(
941 // CHECK-NEXT: entry:
942 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsat.b(<32 x i8> [[_1:%.*]], i32 1)
943 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
945 v32i8 xvsat_b(v32i8 _1) { return __builtin_lasx_xvsat_b(_1, 1); }
946 // CHECK-LABEL: @xvsat_h(
947 // CHECK-NEXT: entry:
948 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsat.h(<16 x i16> [[_1:%.*]], i32 1)
949 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
951 v16i16 xvsat_h(v16i16 _1) { return __builtin_lasx_xvsat_h(_1, 1); }
952 // CHECK-LABEL: @xvsat_w(
953 // CHECK-NEXT: entry:
954 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsat.w(<8 x i32> [[_1:%.*]], i32 1)
955 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
957 v8i32 xvsat_w(v8i32 _1) { return __builtin_lasx_xvsat_w(_1, 1); }
958 // CHECK-LABEL: @xvsat_d(
959 // CHECK-NEXT: entry:
960 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsat.d(<4 x i64> [[_1:%.*]], i32 1)
961 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
963 v4i64 xvsat_d(v4i64 _1) { return __builtin_lasx_xvsat_d(_1, 1); }
964 // CHECK-LABEL: @xvsat_bu(
965 // CHECK-NEXT: entry:
966 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsat.bu(<32 x i8> [[_1:%.*]], i32 1)
967 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
969 v32u8 xvsat_bu(v32u8 _1) { return __builtin_lasx_xvsat_bu(_1, 1); }
970 // CHECK-LABEL: @xvsat_hu(
971 // CHECK-NEXT: entry:
972 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsat.hu(<16 x i16> [[_1:%.*]], i32 1)
973 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
975 v16u16 xvsat_hu(v16u16 _1) { return __builtin_lasx_xvsat_hu(_1, 1); }
976 // CHECK-LABEL: @xvsat_wu(
977 // CHECK-NEXT: entry:
978 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsat.wu(<8 x i32> [[_1:%.*]], i32 1)
979 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
981 v8u32 xvsat_wu(v8u32 _1) { return __builtin_lasx_xvsat_wu(_1, 1); }
982 // CHECK-LABEL: @xvsat_du(
983 // CHECK-NEXT: entry:
984 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsat.du(<4 x i64> [[_1:%.*]], i32 1)
985 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
987 v4u64 xvsat_du(v4u64 _1) { return __builtin_lasx_xvsat_du(_1, 1); }
988 // CHECK-LABEL: @xvadda_b(
989 // CHECK-NEXT: entry:
990 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvadda.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
991 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
993 v32i8 xvadda_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvadda_b(_1, _2); }
994 // CHECK-LABEL: @xvadda_h(
995 // CHECK-NEXT: entry:
996 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvadda.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
997 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
999 v16i16 xvadda_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvadda_h(_1, _2); }
1000 // CHECK-LABEL: @xvadda_w(
1001 // CHECK-NEXT: entry:
1002 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvadda.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1003 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1005 v8i32 xvadda_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvadda_w(_1, _2); }
1006 // CHECK-LABEL: @xvadda_d(
1007 // CHECK-NEXT: entry:
1008 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvadda.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1009 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1011 v4i64 xvadda_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvadda_d(_1, _2); }
1012 // CHECK-LABEL: @xvsadd_b(
1013 // CHECK-NEXT: entry:
1014 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsadd.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1015 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1017 v32i8 xvsadd_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsadd_b(_1, _2); }
1018 // CHECK-LABEL: @xvsadd_h(
1019 // CHECK-NEXT: entry:
1020 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsadd.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1021 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1023 v16i16 xvsadd_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsadd_h(_1, _2); }
1024 // CHECK-LABEL: @xvsadd_w(
1025 // CHECK-NEXT: entry:
1026 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsadd.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1027 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1029 v8i32 xvsadd_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsadd_w(_1, _2); }
1030 // CHECK-LABEL: @xvsadd_d(
1031 // CHECK-NEXT: entry:
1032 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsadd.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1033 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1035 v4i64 xvsadd_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsadd_d(_1, _2); }
1036 // CHECK-LABEL: @xvsadd_bu(
1037 // CHECK-NEXT: entry:
1038 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsadd.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1039 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1041 v32u8 xvsadd_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvsadd_bu(_1, _2); }
1042 // CHECK-LABEL: @xvsadd_hu(
1043 // CHECK-NEXT: entry:
1044 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsadd.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1045 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1047 v16u16 xvsadd_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvsadd_hu(_1, _2); }
1048 // CHECK-LABEL: @xvsadd_wu(
1049 // CHECK-NEXT: entry:
1050 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsadd.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1051 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1053 v8u32 xvsadd_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvsadd_wu(_1, _2); }
1054 // CHECK-LABEL: @xvsadd_du(
1055 // CHECK-NEXT: entry:
1056 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsadd.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1057 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1059 v4u64 xvsadd_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvsadd_du(_1, _2); }
1060 // CHECK-LABEL: @xvavg_b(
1061 // CHECK-NEXT: entry:
1062 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvavg.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1063 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1065 v32i8 xvavg_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvavg_b(_1, _2); }
1066 // CHECK-LABEL: @xvavg_h(
1067 // CHECK-NEXT: entry:
1068 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvavg.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1069 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1071 v16i16 xvavg_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvavg_h(_1, _2); }
1072 // CHECK-LABEL: @xvavg_w(
1073 // CHECK-NEXT: entry:
1074 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvavg.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1075 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1077 v8i32 xvavg_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvavg_w(_1, _2); }
1078 // CHECK-LABEL: @xvavg_d(
1079 // CHECK-NEXT: entry:
1080 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvavg.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1081 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1083 v4i64 xvavg_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvavg_d(_1, _2); }
1084 // CHECK-LABEL: @xvavg_bu(
1085 // CHECK-NEXT: entry:
1086 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvavg.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1087 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1089 v32u8 xvavg_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvavg_bu(_1, _2); }
1090 // CHECK-LABEL: @xvavg_hu(
1091 // CHECK-NEXT: entry:
1092 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvavg.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1093 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1095 v16u16 xvavg_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvavg_hu(_1, _2); }
1096 // CHECK-LABEL: @xvavg_wu(
1097 // CHECK-NEXT: entry:
1098 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvavg.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1099 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1101 v8u32 xvavg_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvavg_wu(_1, _2); }
1102 // CHECK-LABEL: @xvavg_du(
1103 // CHECK-NEXT: entry:
1104 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvavg.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1105 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1107 v4u64 xvavg_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvavg_du(_1, _2); }
1108 // CHECK-LABEL: @xvavgr_b(
1109 // CHECK-NEXT: entry:
1110 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvavgr.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1111 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1113 v32i8 xvavgr_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvavgr_b(_1, _2); }
1114 // CHECK-LABEL: @xvavgr_h(
1115 // CHECK-NEXT: entry:
1116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvavgr.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1117 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1119 v16i16 xvavgr_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvavgr_h(_1, _2); }
1120 // CHECK-LABEL: @xvavgr_w(
1121 // CHECK-NEXT: entry:
1122 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvavgr.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1123 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1125 v8i32 xvavgr_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvavgr_w(_1, _2); }
1126 // CHECK-LABEL: @xvavgr_d(
1127 // CHECK-NEXT: entry:
1128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvavgr.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1129 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1131 v4i64 xvavgr_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvavgr_d(_1, _2); }
1132 // CHECK-LABEL: @xvavgr_bu(
1133 // CHECK-NEXT: entry:
1134 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvavgr.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1135 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1137 v32u8 xvavgr_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvavgr_bu(_1, _2); }
1138 // CHECK-LABEL: @xvavgr_hu(
1139 // CHECK-NEXT: entry:
1140 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvavgr.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1141 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1143 v16u16 xvavgr_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvavgr_hu(_1, _2); }
1144 // CHECK-LABEL: @xvavgr_wu(
1145 // CHECK-NEXT: entry:
1146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvavgr.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1147 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1149 v8u32 xvavgr_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvavgr_wu(_1, _2); }
1150 // CHECK-LABEL: @xvavgr_du(
1151 // CHECK-NEXT: entry:
1152 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvavgr.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1153 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1155 v4u64 xvavgr_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvavgr_du(_1, _2); }
1156 // CHECK-LABEL: @xvssub_b(
1157 // CHECK-NEXT: entry:
1158 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssub.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1159 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1161 v32i8 xvssub_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvssub_b(_1, _2); }
1162 // CHECK-LABEL: @xvssub_h(
1163 // CHECK-NEXT: entry:
1164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssub.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1165 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1167 v16i16 xvssub_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssub_h(_1, _2); }
1168 // CHECK-LABEL: @xvssub_w(
1169 // CHECK-NEXT: entry:
1170 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssub.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1171 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1173 v8i32 xvssub_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssub_w(_1, _2); }
1174 // CHECK-LABEL: @xvssub_d(
1175 // CHECK-NEXT: entry:
1176 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssub.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1177 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1179 v4i64 xvssub_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssub_d(_1, _2); }
1180 // CHECK-LABEL: @xvssub_bu(
1181 // CHECK-NEXT: entry:
1182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssub.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1183 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1185 v32u8 xvssub_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvssub_bu(_1, _2); }
1186 // CHECK-LABEL: @xvssub_hu(
1187 // CHECK-NEXT: entry:
1188 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssub.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1189 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1191 v16u16 xvssub_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvssub_hu(_1, _2); }
1192 // CHECK-LABEL: @xvssub_wu(
1193 // CHECK-NEXT: entry:
1194 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssub.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1195 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1197 v8u32 xvssub_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvssub_wu(_1, _2); }
1198 // CHECK-LABEL: @xvssub_du(
1199 // CHECK-NEXT: entry:
1200 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssub.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1201 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1203 v4u64 xvssub_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvssub_du(_1, _2); }
1204 // CHECK-LABEL: @xvabsd_b(
1205 // CHECK-NEXT: entry:
1206 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvabsd.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1207 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1209 v32i8 xvabsd_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvabsd_b(_1, _2); }
1210 // CHECK-LABEL: @xvabsd_h(
1211 // CHECK-NEXT: entry:
1212 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvabsd.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1213 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1215 v16i16 xvabsd_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvabsd_h(_1, _2); }
1216 // CHECK-LABEL: @xvabsd_w(
1217 // CHECK-NEXT: entry:
1218 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvabsd.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1219 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1221 v8i32 xvabsd_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvabsd_w(_1, _2); }
1222 // CHECK-LABEL: @xvabsd_d(
1223 // CHECK-NEXT: entry:
1224 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvabsd.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1225 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1227 v4i64 xvabsd_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvabsd_d(_1, _2); }
1228 // CHECK-LABEL: @xvabsd_bu(
1229 // CHECK-NEXT: entry:
1230 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvabsd.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1231 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1233 v32u8 xvabsd_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvabsd_bu(_1, _2); }
1234 // CHECK-LABEL: @xvabsd_hu(
1235 // CHECK-NEXT: entry:
1236 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvabsd.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1237 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1239 v16u16 xvabsd_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvabsd_hu(_1, _2); }
1240 // CHECK-LABEL: @xvabsd_wu(
1241 // CHECK-NEXT: entry:
1242 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvabsd.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1243 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1245 v8u32 xvabsd_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvabsd_wu(_1, _2); }
1246 // CHECK-LABEL: @xvabsd_du(
1247 // CHECK-NEXT: entry:
1248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvabsd.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1249 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1251 v4u64 xvabsd_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvabsd_du(_1, _2); }
1252 // CHECK-LABEL: @xvmul_b(
1253 // CHECK-NEXT: entry:
1254 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmul.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1255 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1257 v32i8 xvmul_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmul_b(_1, _2); }
1258 // CHECK-LABEL: @xvmul_h(
1259 // CHECK-NEXT: entry:
1260 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmul.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1261 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1263 v16i16 xvmul_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmul_h(_1, _2); }
1264 // CHECK-LABEL: @xvmul_w(
1265 // CHECK-NEXT: entry:
1266 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmul.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1267 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1269 v8i32 xvmul_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmul_w(_1, _2); }
1270 // CHECK-LABEL: @xvmul_d(
1271 // CHECK-NEXT: entry:
1272 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmul.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1273 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1275 v4i64 xvmul_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmul_d(_1, _2); }
1276 // CHECK-LABEL: @xvmadd_b(
1277 // CHECK-NEXT: entry:
1278 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmadd.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
1279 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1281 v32i8 xvmadd_b(v32i8 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvmadd_b(_1, _2, _3); }
1282 // CHECK-LABEL: @xvmadd_h(
1283 // CHECK-NEXT: entry:
1284 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmadd.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
1285 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1287 v16i16 xvmadd_h(v16i16 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvmadd_h(_1, _2, _3); }
1288 // CHECK-LABEL: @xvmadd_w(
1289 // CHECK-NEXT: entry:
1290 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmadd.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
1291 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1293 v8i32 xvmadd_w(v8i32 _1, v8i32 _2, v8i32 _3) { return __builtin_lasx_xvmadd_w(_1, _2, _3); }
1294 // CHECK-LABEL: @xvmadd_d(
1295 // CHECK-NEXT: entry:
1296 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmadd.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
1297 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1299 v4i64 xvmadd_d(v4i64 _1, v4i64 _2, v4i64 _3) { return __builtin_lasx_xvmadd_d(_1, _2, _3); }
1300 // CHECK-LABEL: @xvmsub_b(
1301 // CHECK-NEXT: entry:
1302 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmsub.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
1303 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1305 v32i8 xvmsub_b(v32i8 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvmsub_b(_1, _2, _3); }
1306 // CHECK-LABEL: @xvmsub_h(
1307 // CHECK-NEXT: entry:
1308 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmsub.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
1309 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1311 v16i16 xvmsub_h(v16i16 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvmsub_h(_1, _2, _3); }
1312 // CHECK-LABEL: @xvmsub_w(
1313 // CHECK-NEXT: entry:
1314 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmsub.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
1315 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1317 v8i32 xvmsub_w(v8i32 _1, v8i32 _2, v8i32 _3) { return __builtin_lasx_xvmsub_w(_1, _2, _3); }
1318 // CHECK-LABEL: @xvmsub_d(
1319 // CHECK-NEXT: entry:
1320 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmsub.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
1321 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1323 v4i64 xvmsub_d(v4i64 _1, v4i64 _2, v4i64 _3) { return __builtin_lasx_xvmsub_d(_1, _2, _3); }
1324 // CHECK-LABEL: @xvdiv_b(
1325 // CHECK-NEXT: entry:
1326 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvdiv.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1327 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1329 v32i8 xvdiv_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvdiv_b(_1, _2); }
1330 // CHECK-LABEL: @xvdiv_h(
1331 // CHECK-NEXT: entry:
1332 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvdiv.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1333 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1335 v16i16 xvdiv_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvdiv_h(_1, _2); }
1336 // CHECK-LABEL: @xvdiv_w(
1337 // CHECK-NEXT: entry:
1338 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvdiv.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1339 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1341 v8i32 xvdiv_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvdiv_w(_1, _2); }
1342 // CHECK-LABEL: @xvdiv_d(
1343 // CHECK-NEXT: entry:
1344 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvdiv.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1345 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1347 v4i64 xvdiv_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvdiv_d(_1, _2); }
1348 // CHECK-LABEL: @xvdiv_bu(
1349 // CHECK-NEXT: entry:
1350 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvdiv.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1351 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1353 v32u8 xvdiv_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvdiv_bu(_1, _2); }
1354 // CHECK-LABEL: @xvdiv_hu(
1355 // CHECK-NEXT: entry:
1356 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvdiv.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1357 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1359 v16u16 xvdiv_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvdiv_hu(_1, _2); }
1360 // CHECK-LABEL: @xvdiv_wu(
1361 // CHECK-NEXT: entry:
1362 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvdiv.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1363 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1365 v8u32 xvdiv_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvdiv_wu(_1, _2); }
1366 // CHECK-LABEL: @xvdiv_du(
1367 // CHECK-NEXT: entry:
1368 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvdiv.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1369 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1371 v4u64 xvdiv_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvdiv_du(_1, _2); }
1372 // CHECK-LABEL: @xvhaddw_h_b(
1373 // CHECK-NEXT: entry:
1374 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvhaddw.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1375 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1377 v16i16 xvhaddw_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvhaddw_h_b(_1, _2); }
1378 // CHECK-LABEL: @xvhaddw_w_h(
1379 // CHECK-NEXT: entry:
1380 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvhaddw.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1381 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1383 v8i32 xvhaddw_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvhaddw_w_h(_1, _2); }
1384 // CHECK-LABEL: @xvhaddw_d_w(
1385 // CHECK-NEXT: entry:
1386 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhaddw.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1387 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1389 v4i64 xvhaddw_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvhaddw_d_w(_1, _2); }
1390 // CHECK-LABEL: @xvhaddw_hu_bu(
1391 // CHECK-NEXT: entry:
1392 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvhaddw.hu.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1393 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1395 v16u16 xvhaddw_hu_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvhaddw_hu_bu(_1, _2); }
1396 // CHECK-LABEL: @xvhaddw_wu_hu(
1397 // CHECK-NEXT: entry:
1398 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvhaddw.wu.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1399 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1401 v8u32 xvhaddw_wu_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvhaddw_wu_hu(_1, _2); }
1402 // CHECK-LABEL: @xvhaddw_du_wu(
1403 // CHECK-NEXT: entry:
1404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhaddw.du.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1405 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1407 v4u64 xvhaddw_du_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvhaddw_du_wu(_1, _2); }
1408 // CHECK-LABEL: @xvhsubw_h_b(
1409 // CHECK-NEXT: entry:
1410 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvhsubw.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1411 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1413 v16i16 xvhsubw_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvhsubw_h_b(_1, _2); }
1414 // CHECK-LABEL: @xvhsubw_w_h(
1415 // CHECK-NEXT: entry:
1416 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvhsubw.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1417 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1419 v8i32 xvhsubw_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvhsubw_w_h(_1, _2); }
1420 // CHECK-LABEL: @xvhsubw_d_w(
1421 // CHECK-NEXT: entry:
1422 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhsubw.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1423 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1425 v4i64 xvhsubw_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvhsubw_d_w(_1, _2); }
1426 // CHECK-LABEL: @xvhsubw_hu_bu(
1427 // CHECK-NEXT: entry:
1428 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvhsubw.hu.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1429 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1431 v16i16 xvhsubw_hu_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvhsubw_hu_bu(_1, _2); }
1432 // CHECK-LABEL: @xvhsubw_wu_hu(
1433 // CHECK-NEXT: entry:
1434 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvhsubw.wu.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1435 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1437 v8i32 xvhsubw_wu_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvhsubw_wu_hu(_1, _2); }
1438 // CHECK-LABEL: @xvhsubw_du_wu(
1439 // CHECK-NEXT: entry:
1440 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhsubw.du.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1441 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1443 v4i64 xvhsubw_du_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvhsubw_du_wu(_1, _2); }
1444 // CHECK-LABEL: @xvmod_b(
1445 // CHECK-NEXT: entry:
1446 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmod.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1447 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1449 v32i8 xvmod_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmod_b(_1, _2); }
1450 // CHECK-LABEL: @xvmod_h(
1451 // CHECK-NEXT: entry:
1452 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmod.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1453 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1455 v16i16 xvmod_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmod_h(_1, _2); }
1456 // CHECK-LABEL: @xvmod_w(
1457 // CHECK-NEXT: entry:
1458 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmod.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1459 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1461 v8i32 xvmod_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmod_w(_1, _2); }
1462 // CHECK-LABEL: @xvmod_d(
1463 // CHECK-NEXT: entry:
1464 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmod.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1465 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1467 v4i64 xvmod_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmod_d(_1, _2); }
1468 // CHECK-LABEL: @xvmod_bu(
1469 // CHECK-NEXT: entry:
1470 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmod.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1471 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1473 v32u8 xvmod_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmod_bu(_1, _2); }
1474 // CHECK-LABEL: @xvmod_hu(
1475 // CHECK-NEXT: entry:
1476 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmod.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1477 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1479 v16u16 xvmod_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmod_hu(_1, _2); }
1480 // CHECK-LABEL: @xvmod_wu(
1481 // CHECK-NEXT: entry:
1482 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmod.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1483 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1485 v8u32 xvmod_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmod_wu(_1, _2); }
1486 // CHECK-LABEL: @xvmod_du(
1487 // CHECK-NEXT: entry:
1488 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmod.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1489 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1491 v4u64 xvmod_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmod_du(_1, _2); }
1492 // CHECK-LABEL: @xvrepl128vei_b(
1493 // CHECK-NEXT: entry:
1494 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvrepl128vei.b(<32 x i8> [[_1:%.*]], i32 1)
1495 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1497 v32i8 xvrepl128vei_b(v32i8 _1) { return __builtin_lasx_xvrepl128vei_b(_1, 1); }
1498 // CHECK-LABEL: @xvrepl128vei_h(
1499 // CHECK-NEXT: entry:
1500 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvrepl128vei.h(<16 x i16> [[_1:%.*]], i32 1)
1501 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1503 v16i16 xvrepl128vei_h(v16i16 _1) { return __builtin_lasx_xvrepl128vei_h(_1, 1); }
1504 // CHECK-LABEL: @xvrepl128vei_w(
1505 // CHECK-NEXT: entry:
1506 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvrepl128vei.w(<8 x i32> [[_1:%.*]], i32 1)
1507 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1509 v8i32 xvrepl128vei_w(v8i32 _1) { return __builtin_lasx_xvrepl128vei_w(_1, 1); }
1510 // CHECK-LABEL: @xvrepl128vei_d(
1511 // CHECK-NEXT: entry:
1512 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvrepl128vei.d(<4 x i64> [[_1:%.*]], i32 1)
1513 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1515 v4i64 xvrepl128vei_d(v4i64 _1) { return __builtin_lasx_xvrepl128vei_d(_1, 1); }
1516 // CHECK-LABEL: @xvpickev_b(
1517 // CHECK-NEXT: entry:
1518 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpickev.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1519 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1521 v32i8 xvpickev_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvpickev_b(_1, _2); }
1522 // CHECK-LABEL: @xvpickev_h(
1523 // CHECK-NEXT: entry:
1524 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvpickev.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1525 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1527 v16i16 xvpickev_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvpickev_h(_1, _2); }
1528 // CHECK-LABEL: @xvpickev_w(
1529 // CHECK-NEXT: entry:
1530 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpickev.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1531 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1533 v8i32 xvpickev_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvpickev_w(_1, _2); }
1534 // CHECK-LABEL: @xvpickev_d(
1535 // CHECK-NEXT: entry:
1536 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpickev.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1537 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1539 v4i64 xvpickev_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvpickev_d(_1, _2); }
1540 // CHECK-LABEL: @xvpickod_b(
1541 // CHECK-NEXT: entry:
1542 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpickod.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1543 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1545 v32i8 xvpickod_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvpickod_b(_1, _2); }
1546 // CHECK-LABEL: @xvpickod_h(
1547 // CHECK-NEXT: entry:
1548 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvpickod.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1549 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1551 v16i16 xvpickod_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvpickod_h(_1, _2); }
1552 // CHECK-LABEL: @xvpickod_w(
1553 // CHECK-NEXT: entry:
1554 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpickod.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1555 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1557 v8i32 xvpickod_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvpickod_w(_1, _2); }
1558 // CHECK-LABEL: @xvpickod_d(
1559 // CHECK-NEXT: entry:
1560 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpickod.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1561 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1563 v4i64 xvpickod_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvpickod_d(_1, _2); }
1564 // CHECK-LABEL: @xvilvh_b(
1565 // CHECK-NEXT: entry:
1566 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvilvh.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1567 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1569 v32i8 xvilvh_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvilvh_b(_1, _2); }
1570 // CHECK-LABEL: @xvilvh_h(
1571 // CHECK-NEXT: entry:
1572 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvilvh.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1573 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1575 v16i16 xvilvh_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvilvh_h(_1, _2); }
1576 // CHECK-LABEL: @xvilvh_w(
1577 // CHECK-NEXT: entry:
1578 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvilvh.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1579 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1581 v8i32 xvilvh_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvilvh_w(_1, _2); }
1582 // CHECK-LABEL: @xvilvh_d(
1583 // CHECK-NEXT: entry:
1584 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvilvh.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1585 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1587 v4i64 xvilvh_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvilvh_d(_1, _2); }
1588 // CHECK-LABEL: @xvilvl_b(
1589 // CHECK-NEXT: entry:
1590 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvilvl.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1591 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1593 v32i8 xvilvl_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvilvl_b(_1, _2); }
1594 // CHECK-LABEL: @xvilvl_h(
1595 // CHECK-NEXT: entry:
1596 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvilvl.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1597 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1599 v16i16 xvilvl_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvilvl_h(_1, _2); }
1600 // CHECK-LABEL: @xvilvl_w(
1601 // CHECK-NEXT: entry:
1602 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvilvl.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1603 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1605 v8i32 xvilvl_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvilvl_w(_1, _2); }
1606 // CHECK-LABEL: @xvilvl_d(
1607 // CHECK-NEXT: entry:
1608 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvilvl.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1609 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1611 v4i64 xvilvl_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvilvl_d(_1, _2); }
1612 // CHECK-LABEL: @xvpackev_b(
1613 // CHECK-NEXT: entry:
1614 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpackev.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1615 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1617 v32i8 xvpackev_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvpackev_b(_1, _2); }
1618 // CHECK-LABEL: @xvpackev_h(
1619 // CHECK-NEXT: entry:
1620 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvpackev.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1621 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1623 v16i16 xvpackev_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvpackev_h(_1, _2); }
1624 // CHECK-LABEL: @xvpackev_w(
1625 // CHECK-NEXT: entry:
1626 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpackev.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1627 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1629 v8i32 xvpackev_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvpackev_w(_1, _2); }
1630 // CHECK-LABEL: @xvpackev_d(
1631 // CHECK-NEXT: entry:
1632 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpackev.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1633 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1635 v4i64 xvpackev_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvpackev_d(_1, _2); }
1636 // CHECK-LABEL: @xvpackod_b(
1637 // CHECK-NEXT: entry:
1638 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpackod.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1639 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1641 v32i8 xvpackod_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvpackod_b(_1, _2); }
1642 // CHECK-LABEL: @xvpackod_h(
1643 // CHECK-NEXT: entry:
1644 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvpackod.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
1645 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1647 v16i16 xvpackod_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvpackod_h(_1, _2); }
1648 // CHECK-LABEL: @xvpackod_w(
1649 // CHECK-NEXT: entry:
1650 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpackod.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
1651 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1653 v8i32 xvpackod_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvpackod_w(_1, _2); }
1654 // CHECK-LABEL: @xvpackod_d(
1655 // CHECK-NEXT: entry:
1656 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpackod.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
1657 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1659 v4i64 xvpackod_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvpackod_d(_1, _2); }
1660 // CHECK-LABEL: @xvshuf_b(
1661 // CHECK-NEXT: entry:
1662 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvshuf.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
1663 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1665 v32i8 xvshuf_b(v32i8 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvshuf_b(_1, _2, _3); }
1666 // CHECK-LABEL: @xvshuf_h(
1667 // CHECK-NEXT: entry:
1668 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvshuf.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
1669 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1671 v16i16 xvshuf_h(v16i16 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvshuf_h(_1, _2, _3); }
1672 // CHECK-LABEL: @xvshuf_w(
1673 // CHECK-NEXT: entry:
1674 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvshuf.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
1675 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1677 v8i32 xvshuf_w(v8i32 _1, v8i32 _2, v8i32 _3) { return __builtin_lasx_xvshuf_w(_1, _2, _3); }
1678 // CHECK-LABEL: @xvshuf_d(
1679 // CHECK-NEXT: entry:
1680 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvshuf.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
1681 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1683 v4i64 xvshuf_d(v4i64 _1, v4i64 _2, v4i64 _3) { return __builtin_lasx_xvshuf_d(_1, _2, _3); }
1684 // CHECK-LABEL: @xvand_v(
1685 // CHECK-NEXT: entry:
1686 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvand.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1687 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1689 v32u8 xvand_v(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvand_v(_1, _2); }
1690 // CHECK-LABEL: @xvandi_b(
1691 // CHECK-NEXT: entry:
1692 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvandi.b(<32 x i8> [[_1:%.*]], i32 1)
1693 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1695 v32u8 xvandi_b(v32u8 _1) { return __builtin_lasx_xvandi_b(_1, 1); }
1696 // CHECK-LABEL: @xvor_v(
1697 // CHECK-NEXT: entry:
1698 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvor.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1699 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1701 v32u8 xvor_v(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvor_v(_1, _2); }
1702 // CHECK-LABEL: @xvori_b(
1703 // CHECK-NEXT: entry:
1704 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvori.b(<32 x i8> [[_1:%.*]], i32 1)
1705 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1707 v32u8 xvori_b(v32u8 _1) { return __builtin_lasx_xvori_b(_1, 1); }
1708 // CHECK-LABEL: @xvnor_v(
1709 // CHECK-NEXT: entry:
1710 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvnor.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1711 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1713 v32u8 xvnor_v(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvnor_v(_1, _2); }
1714 // CHECK-LABEL: @xvnori_b(
1715 // CHECK-NEXT: entry:
1716 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvnori.b(<32 x i8> [[_1:%.*]], i32 1)
1717 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1719 v32u8 xvnori_b(v32u8 _1) { return __builtin_lasx_xvnori_b(_1, 1); }
1720 // CHECK-LABEL: @xvxor_v(
1721 // CHECK-NEXT: entry:
1722 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvxor.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
1723 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1725 v32u8 xvxor_v(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvxor_v(_1, _2); }
1726 // CHECK-LABEL: @xvxori_b(
1727 // CHECK-NEXT: entry:
1728 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvxori.b(<32 x i8> [[_1:%.*]], i32 1)
1729 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1731 v32u8 xvxori_b(v32u8 _1) { return __builtin_lasx_xvxori_b(_1, 1); }
1732 // CHECK-LABEL: @xvbitsel_v(
1733 // CHECK-NEXT: entry:
1734 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitsel.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
1735 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1737 v32u8 xvbitsel_v(v32u8 _1, v32u8 _2, v32u8 _3) { return __builtin_lasx_xvbitsel_v(_1, _2, _3); }
1738 // CHECK-LABEL: @xvbitseli_b(
1739 // CHECK-NEXT: entry:
1740 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbitseli.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
1741 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1743 v32u8 xvbitseli_b(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvbitseli_b(_1, _2, 1); }
1744 // CHECK-LABEL: @xvshuf4i_b(
1745 // CHECK-NEXT: entry:
1746 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvshuf4i.b(<32 x i8> [[_1:%.*]], i32 1)
1747 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1749 v32i8 xvshuf4i_b(v32i8 _1) { return __builtin_lasx_xvshuf4i_b(_1, 1); }
1750 // CHECK-LABEL: @xvshuf4i_h(
1751 // CHECK-NEXT: entry:
1752 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvshuf4i.h(<16 x i16> [[_1:%.*]], i32 1)
1753 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1755 v16i16 xvshuf4i_h(v16i16 _1) { return __builtin_lasx_xvshuf4i_h(_1, 1); }
1756 // CHECK-LABEL: @xvshuf4i_w(
1757 // CHECK-NEXT: entry:
1758 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvshuf4i.w(<8 x i32> [[_1:%.*]], i32 1)
1759 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1761 v8i32 xvshuf4i_w(v8i32 _1) { return __builtin_lasx_xvshuf4i_w(_1, 1); }
1762 // CHECK-LABEL: @xvreplgr2vr_b(
1763 // CHECK-NEXT: entry:
1764 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvreplgr2vr.b(i32 [[_1:%.*]])
1765 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1767 v32i8 xvreplgr2vr_b(int _1) { return __builtin_lasx_xvreplgr2vr_b(_1); }
1768 // CHECK-LABEL: @xvreplgr2vr_h(
1769 // CHECK-NEXT: entry:
1770 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvreplgr2vr.h(i32 [[_1:%.*]])
1771 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1773 v16i16 xvreplgr2vr_h(int _1) { return __builtin_lasx_xvreplgr2vr_h(_1); }
1774 // CHECK-LABEL: @xvreplgr2vr_w(
1775 // CHECK-NEXT: entry:
1776 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvreplgr2vr.w(i32 [[_1:%.*]])
1777 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1779 v8i32 xvreplgr2vr_w(int _1) { return __builtin_lasx_xvreplgr2vr_w(_1); }
1780 // CHECK-LABEL: @xvreplgr2vr_d(
1781 // CHECK-NEXT: entry:
1782 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[_1:%.*]] to i64
1783 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 [[CONV]])
1784 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1786 v4i64 xvreplgr2vr_d(int _1) { return __builtin_lasx_xvreplgr2vr_d(_1); }
1787 // CHECK-LABEL: @xvpcnt_b(
1788 // CHECK-NEXT: entry:
1789 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpcnt.b(<32 x i8> [[_1:%.*]])
1790 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1792 v32i8 xvpcnt_b(v32i8 _1) { return __builtin_lasx_xvpcnt_b(_1); }
1793 // CHECK-LABEL: @xvpcnt_h(
1794 // CHECK-NEXT: entry:
1795 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvpcnt.h(<16 x i16> [[_1:%.*]])
1796 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1798 v16i16 xvpcnt_h(v16i16 _1) { return __builtin_lasx_xvpcnt_h(_1); }
1799 // CHECK-LABEL: @xvpcnt_w(
1800 // CHECK-NEXT: entry:
1801 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpcnt.w(<8 x i32> [[_1:%.*]])
1802 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1804 v8i32 xvpcnt_w(v8i32 _1) { return __builtin_lasx_xvpcnt_w(_1); }
1805 // CHECK-LABEL: @xvpcnt_d(
1806 // CHECK-NEXT: entry:
1807 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpcnt.d(<4 x i64> [[_1:%.*]])
1808 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1810 v4i64 xvpcnt_d(v4i64 _1) { return __builtin_lasx_xvpcnt_d(_1); }
1811 // CHECK-LABEL: @xvclo_b(
1812 // CHECK-NEXT: entry:
1813 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvclo.b(<32 x i8> [[_1:%.*]])
1814 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1816 v32i8 xvclo_b(v32i8 _1) { return __builtin_lasx_xvclo_b(_1); }
1817 // CHECK-LABEL: @xvclo_h(
1818 // CHECK-NEXT: entry:
1819 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvclo.h(<16 x i16> [[_1:%.*]])
1820 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1822 v16i16 xvclo_h(v16i16 _1) { return __builtin_lasx_xvclo_h(_1); }
1823 // CHECK-LABEL: @xvclo_w(
1824 // CHECK-NEXT: entry:
1825 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvclo.w(<8 x i32> [[_1:%.*]])
1826 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1828 v8i32 xvclo_w(v8i32 _1) { return __builtin_lasx_xvclo_w(_1); }
1829 // CHECK-LABEL: @xvclo_d(
1830 // CHECK-NEXT: entry:
1831 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvclo.d(<4 x i64> [[_1:%.*]])
1832 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1834 v4i64 xvclo_d(v4i64 _1) { return __builtin_lasx_xvclo_d(_1); }
1835 // CHECK-LABEL: @xvclz_b(
1836 // CHECK-NEXT: entry:
1837 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvclz.b(<32 x i8> [[_1:%.*]])
1838 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
1840 v32i8 xvclz_b(v32i8 _1) { return __builtin_lasx_xvclz_b(_1); }
1841 // CHECK-LABEL: @xvclz_h(
1842 // CHECK-NEXT: entry:
1843 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvclz.h(<16 x i16> [[_1:%.*]])
1844 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1846 v16i16 xvclz_h(v16i16 _1) { return __builtin_lasx_xvclz_h(_1); }
1847 // CHECK-LABEL: @xvclz_w(
1848 // CHECK-NEXT: entry:
1849 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvclz.w(<8 x i32> [[_1:%.*]])
1850 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1852 v8i32 xvclz_w(v8i32 _1) { return __builtin_lasx_xvclz_w(_1); }
1853 // CHECK-LABEL: @xvclz_d(
1854 // CHECK-NEXT: entry:
1855 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvclz.d(<4 x i64> [[_1:%.*]])
1856 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1858 v4i64 xvclz_d(v4i64 _1) { return __builtin_lasx_xvclz_d(_1); }
1859 // CHECK-LABEL: @xvfadd_s(
1860 // CHECK-NEXT: entry:
1861 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfadd.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1862 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1864 v8f32 xvfadd_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfadd_s(_1, _2); }
1865 // CHECK-LABEL: @xvfadd_d(
1866 // CHECK-NEXT: entry:
1867 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfadd.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1868 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1870 v4f64 xvfadd_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfadd_d(_1, _2); }
1871 // CHECK-LABEL: @xvfsub_s(
1872 // CHECK-NEXT: entry:
1873 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfsub.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1874 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1876 v8f32 xvfsub_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfsub_s(_1, _2); }
1877 // CHECK-LABEL: @xvfsub_d(
1878 // CHECK-NEXT: entry:
1879 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfsub.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1880 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1882 v4f64 xvfsub_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfsub_d(_1, _2); }
1883 // CHECK-LABEL: @xvfmul_s(
1884 // CHECK-NEXT: entry:
1885 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmul.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1886 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1888 v8f32 xvfmul_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfmul_s(_1, _2); }
1889 // CHECK-LABEL: @xvfmul_d(
1890 // CHECK-NEXT: entry:
1891 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmul.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1892 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1894 v4f64 xvfmul_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfmul_d(_1, _2); }
1895 // CHECK-LABEL: @xvfdiv_s(
1896 // CHECK-NEXT: entry:
1897 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfdiv.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1898 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1900 v8f32 xvfdiv_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfdiv_s(_1, _2); }
1901 // CHECK-LABEL: @xvfdiv_d(
1902 // CHECK-NEXT: entry:
1903 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfdiv.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1904 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1906 v4f64 xvfdiv_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfdiv_d(_1, _2); }
1907 // CHECK-LABEL: @xvfcvt_h_s(
1908 // CHECK-NEXT: entry:
1909 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvfcvt.h.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1910 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
1912 v16i16 xvfcvt_h_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcvt_h_s(_1, _2); }
1913 // CHECK-LABEL: @xvfcvt_s_d(
1914 // CHECK-NEXT: entry:
1915 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfcvt.s.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1916 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1918 v8f32 xvfcvt_s_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcvt_s_d(_1, _2); }
1919 // CHECK-LABEL: @xvfmin_s(
1920 // CHECK-NEXT: entry:
1921 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmin.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1922 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1924 v8f32 xvfmin_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfmin_s(_1, _2); }
1925 // CHECK-LABEL: @xvfmin_d(
1926 // CHECK-NEXT: entry:
1927 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmin.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1928 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1930 v4f64 xvfmin_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfmin_d(_1, _2); }
1931 // CHECK-LABEL: @xvfmina_s(
1932 // CHECK-NEXT: entry:
1933 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmina.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1934 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1936 v8f32 xvfmina_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfmina_s(_1, _2); }
1937 // CHECK-LABEL: @xvfmina_d(
1938 // CHECK-NEXT: entry:
1939 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmina.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1940 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1942 v4f64 xvfmina_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfmina_d(_1, _2); }
1943 // CHECK-LABEL: @xvfmax_s(
1944 // CHECK-NEXT: entry:
1945 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmax.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1946 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1948 v8f32 xvfmax_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfmax_s(_1, _2); }
1949 // CHECK-LABEL: @xvfmax_d(
1950 // CHECK-NEXT: entry:
1951 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmax.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1952 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1954 v4f64 xvfmax_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfmax_d(_1, _2); }
1955 // CHECK-LABEL: @xvfmaxa_s(
1956 // CHECK-NEXT: entry:
1957 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmaxa.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
1958 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1960 v8f32 xvfmaxa_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfmaxa_s(_1, _2); }
1961 // CHECK-LABEL: @xvfmaxa_d(
1962 // CHECK-NEXT: entry:
1963 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmaxa.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
1964 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1966 v4f64 xvfmaxa_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfmaxa_d(_1, _2); }
1967 // CHECK-LABEL: @xvfclass_s(
1968 // CHECK-NEXT: entry:
1969 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfclass.s(<8 x float> [[_1:%.*]])
1970 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
1972 v8i32 xvfclass_s(v8f32 _1) { return __builtin_lasx_xvfclass_s(_1); }
1973 // CHECK-LABEL: @xvfclass_d(
1974 // CHECK-NEXT: entry:
1975 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfclass.d(<4 x double> [[_1:%.*]])
1976 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
1978 v4i64 xvfclass_d(v4f64 _1) { return __builtin_lasx_xvfclass_d(_1); }
1979 // CHECK-LABEL: @xvfsqrt_s(
1980 // CHECK-NEXT: entry:
1981 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfsqrt.s(<8 x float> [[_1:%.*]])
1982 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1984 v8f32 xvfsqrt_s(v8f32 _1) { return __builtin_lasx_xvfsqrt_s(_1); }
1985 // CHECK-LABEL: @xvfsqrt_d(
1986 // CHECK-NEXT: entry:
1987 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfsqrt.d(<4 x double> [[_1:%.*]])
1988 // CHECK-NEXT: ret <4 x double> [[TMP0]]
1990 v4f64 xvfsqrt_d(v4f64 _1) { return __builtin_lasx_xvfsqrt_d(_1); }
1991 // CHECK-LABEL: @xvfrecip_s(
1992 // CHECK-NEXT: entry:
1993 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrecip.s(<8 x float> [[_1:%.*]])
1994 // CHECK-NEXT: ret <8 x float> [[TMP0]]
1996 v8f32 xvfrecip_s(v8f32 _1) { return __builtin_lasx_xvfrecip_s(_1); }
1997 // CHECK-LABEL: @xvfrecip_d(
1998 // CHECK-NEXT: entry:
1999 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrecip.d(<4 x double> [[_1:%.*]])
2000 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2002 v4f64 xvfrecip_d(v4f64 _1) { return __builtin_lasx_xvfrecip_d(_1); }
2003 // CHECK-LABEL: @xvfrint_s(
2004 // CHECK-NEXT: entry:
2005 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrint.s(<8 x float> [[_1:%.*]])
2006 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2008 v8f32 xvfrint_s(v8f32 _1) { return __builtin_lasx_xvfrint_s(_1); }
2009 // CHECK-LABEL: @xvfrint_d(
2010 // CHECK-NEXT: entry:
2011 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrint.d(<4 x double> [[_1:%.*]])
2012 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2014 v4f64 xvfrint_d(v4f64 _1) { return __builtin_lasx_xvfrint_d(_1); }
2015 // CHECK-LABEL: @xvfrsqrt_s(
2016 // CHECK-NEXT: entry:
2017 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrsqrt.s(<8 x float> [[_1:%.*]])
2018 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2020 v8f32 xvfrsqrt_s(v8f32 _1) { return __builtin_lasx_xvfrsqrt_s(_1); }
2021 // CHECK-LABEL: @xvfrsqrt_d(
2022 // CHECK-NEXT: entry:
2023 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrsqrt.d(<4 x double> [[_1:%.*]])
2024 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2026 v4f64 xvfrsqrt_d(v4f64 _1) { return __builtin_lasx_xvfrsqrt_d(_1); }
2027 // CHECK-LABEL: @xvflogb_s(
2028 // CHECK-NEXT: entry:
2029 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvflogb.s(<8 x float> [[_1:%.*]])
2030 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2032 v8f32 xvflogb_s(v8f32 _1) { return __builtin_lasx_xvflogb_s(_1); }
2033 // CHECK-LABEL: @xvflogb_d(
2034 // CHECK-NEXT: entry:
2035 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvflogb.d(<4 x double> [[_1:%.*]])
2036 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2038 v4f64 xvflogb_d(v4f64 _1) { return __builtin_lasx_xvflogb_d(_1); }
2039 // CHECK-LABEL: @xvfcvth_s_h(
2040 // CHECK-NEXT: entry:
2041 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfcvth.s.h(<16 x i16> [[_1:%.*]])
2042 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2044 v8f32 xvfcvth_s_h(v16i16 _1) { return __builtin_lasx_xvfcvth_s_h(_1); }
2045 // CHECK-LABEL: @xvfcvth_d_s(
2046 // CHECK-NEXT: entry:
2047 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfcvth.d.s(<8 x float> [[_1:%.*]])
2048 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2050 v4f64 xvfcvth_d_s(v8f32 _1) { return __builtin_lasx_xvfcvth_d_s(_1); }
2051 // CHECK-LABEL: @xvfcvtl_s_h(
2052 // CHECK-NEXT: entry:
2053 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfcvtl.s.h(<16 x i16> [[_1:%.*]])
2054 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2056 v8f32 xvfcvtl_s_h(v16i16 _1) { return __builtin_lasx_xvfcvtl_s_h(_1); }
2057 // CHECK-LABEL: @xvfcvtl_d_s(
2058 // CHECK-NEXT: entry:
2059 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfcvtl.d.s(<8 x float> [[_1:%.*]])
2060 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2062 v4f64 xvfcvtl_d_s(v8f32 _1) { return __builtin_lasx_xvfcvtl_d_s(_1); }
2063 // CHECK-LABEL: @xvftint_w_s(
2064 // CHECK-NEXT: entry:
2065 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftint.w.s(<8 x float> [[_1:%.*]])
2066 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2068 v8i32 xvftint_w_s(v8f32 _1) { return __builtin_lasx_xvftint_w_s(_1); }
2069 // CHECK-LABEL: @xvftint_l_d(
2070 // CHECK-NEXT: entry:
2071 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftint.l.d(<4 x double> [[_1:%.*]])
2072 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2074 v4i64 xvftint_l_d(v4f64 _1) { return __builtin_lasx_xvftint_l_d(_1); }
2075 // CHECK-LABEL: @xvftint_wu_s(
2076 // CHECK-NEXT: entry:
2077 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftint.wu.s(<8 x float> [[_1:%.*]])
2078 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2080 v8u32 xvftint_wu_s(v8f32 _1) { return __builtin_lasx_xvftint_wu_s(_1); }
2081 // CHECK-LABEL: @xvftint_lu_d(
2082 // CHECK-NEXT: entry:
2083 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftint.lu.d(<4 x double> [[_1:%.*]])
2084 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2086 v4u64 xvftint_lu_d(v4f64 _1) { return __builtin_lasx_xvftint_lu_d(_1); }
2087 // CHECK-LABEL: @xvftintrz_w_s(
2088 // CHECK-NEXT: entry:
2089 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.s(<8 x float> [[_1:%.*]])
2090 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2092 v8i32 xvftintrz_w_s(v8f32 _1) { return __builtin_lasx_xvftintrz_w_s(_1); }
2093 // CHECK-LABEL: @xvftintrz_l_d(
2094 // CHECK-NEXT: entry:
2095 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrz.l.d(<4 x double> [[_1:%.*]])
2096 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2098 v4i64 xvftintrz_l_d(v4f64 _1) { return __builtin_lasx_xvftintrz_l_d(_1); }
2099 // CHECK-LABEL: @xvftintrz_wu_s(
2100 // CHECK-NEXT: entry:
2101 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrz.wu.s(<8 x float> [[_1:%.*]])
2102 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2104 v8u32 xvftintrz_wu_s(v8f32 _1) { return __builtin_lasx_xvftintrz_wu_s(_1); }
2105 // CHECK-LABEL: @xvftintrz_lu_d(
2106 // CHECK-NEXT: entry:
2107 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrz.lu.d(<4 x double> [[_1:%.*]])
2108 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2110 v4u64 xvftintrz_lu_d(v4f64 _1) { return __builtin_lasx_xvftintrz_lu_d(_1); }
2111 // CHECK-LABEL: @xvffint_s_w(
2112 // CHECK-NEXT: entry:
2113 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvffint.s.w(<8 x i32> [[_1:%.*]])
2114 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2116 v8f32 xvffint_s_w(v8i32 _1) { return __builtin_lasx_xvffint_s_w(_1); }
2117 // CHECK-LABEL: @xvffint_d_l(
2118 // CHECK-NEXT: entry:
2119 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvffint.d.l(<4 x i64> [[_1:%.*]])
2120 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2122 v4f64 xvffint_d_l(v4i64 _1) { return __builtin_lasx_xvffint_d_l(_1); }
2123 // CHECK-LABEL: @xvffint_s_wu(
2124 // CHECK-NEXT: entry:
2125 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvffint.s.wu(<8 x i32> [[_1:%.*]])
2126 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2128 v8f32 xvffint_s_wu(v8u32 _1) { return __builtin_lasx_xvffint_s_wu(_1); }
2129 // CHECK-LABEL: @xvffint_d_lu(
2130 // CHECK-NEXT: entry:
2131 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvffint.d.lu(<4 x i64> [[_1:%.*]])
2132 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2134 v4f64 xvffint_d_lu(v4u64 _1) { return __builtin_lasx_xvffint_d_lu(_1); }
2135 // CHECK-LABEL: @xvreplve_b(
2136 // CHECK-NEXT: entry:
2137 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvreplve.b(<32 x i8> [[_1:%.*]], i32 [[_2:%.*]])
2138 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2140 v32i8 xvreplve_b(v32i8 _1, int _2) { return __builtin_lasx_xvreplve_b(_1, _2); }
2141 // CHECK-LABEL: @xvreplve_h(
2142 // CHECK-NEXT: entry:
2143 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvreplve.h(<16 x i16> [[_1:%.*]], i32 [[_2:%.*]])
2144 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2146 v16i16 xvreplve_h(v16i16 _1, int _2) { return __builtin_lasx_xvreplve_h(_1, _2); }
2147 // CHECK-LABEL: @xvreplve_w(
2148 // CHECK-NEXT: entry:
2149 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvreplve.w(<8 x i32> [[_1:%.*]], i32 [[_2:%.*]])
2150 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2152 v8i32 xvreplve_w(v8i32 _1, int _2) { return __builtin_lasx_xvreplve_w(_1, _2); }
2153 // CHECK-LABEL: @xvreplve_d(
2154 // CHECK-NEXT: entry:
2155 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvreplve.d(<4 x i64> [[_1:%.*]], i32 [[_2:%.*]])
2156 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2158 v4i64 xvreplve_d(v4i64 _1, int _2) { return __builtin_lasx_xvreplve_d(_1, _2); }
2159 // CHECK-LABEL: @xvpermi_w(
2160 // CHECK-NEXT: entry:
2161 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpermi.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
2162 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2164 v8i32 xvpermi_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvpermi_w(_1, _2, 1); }
2165 // CHECK-LABEL: @xvandn_v(
2166 // CHECK-NEXT: entry:
2167 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvandn.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
2168 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2170 v32u8 xvandn_v(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvandn_v(_1, _2); }
2171 // CHECK-LABEL: @xvneg_b(
2172 // CHECK-NEXT: entry:
2173 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvneg.b(<32 x i8> [[_1:%.*]])
2174 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2176 v32i8 xvneg_b(v32i8 _1) { return __builtin_lasx_xvneg_b(_1); }
2177 // CHECK-LABEL: @xvneg_h(
2178 // CHECK-NEXT: entry:
2179 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvneg.h(<16 x i16> [[_1:%.*]])
2180 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2182 v16i16 xvneg_h(v16i16 _1) { return __builtin_lasx_xvneg_h(_1); }
2183 // CHECK-LABEL: @xvneg_w(
2184 // CHECK-NEXT: entry:
2185 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvneg.w(<8 x i32> [[_1:%.*]])
2186 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2188 v8i32 xvneg_w(v8i32 _1) { return __builtin_lasx_xvneg_w(_1); }
2189 // CHECK-LABEL: @xvneg_d(
2190 // CHECK-NEXT: entry:
2191 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvneg.d(<4 x i64> [[_1:%.*]])
2192 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2194 v4i64 xvneg_d(v4i64 _1) { return __builtin_lasx_xvneg_d(_1); }
2195 // CHECK-LABEL: @xvmuh_b(
2196 // CHECK-NEXT: entry:
2197 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmuh.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
2198 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2200 v32i8 xvmuh_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmuh_b(_1, _2); }
2201 // CHECK-LABEL: @xvmuh_h(
2202 // CHECK-NEXT: entry:
2203 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmuh.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2204 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2206 v16i16 xvmuh_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmuh_h(_1, _2); }
2207 // CHECK-LABEL: @xvmuh_w(
2208 // CHECK-NEXT: entry:
2209 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmuh.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2210 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2212 v8i32 xvmuh_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmuh_w(_1, _2); }
2213 // CHECK-LABEL: @xvmuh_d(
2214 // CHECK-NEXT: entry:
2215 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmuh.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2216 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2218 v4i64 xvmuh_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmuh_d(_1, _2); }
2219 // CHECK-LABEL: @xvmuh_bu(
2220 // CHECK-NEXT: entry:
2221 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmuh.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
2222 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2224 v32u8 xvmuh_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmuh_bu(_1, _2); }
2225 // CHECK-LABEL: @xvmuh_hu(
2226 // CHECK-NEXT: entry:
2227 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmuh.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2228 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2230 v16u16 xvmuh_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmuh_hu(_1, _2); }
2231 // CHECK-LABEL: @xvmuh_wu(
2232 // CHECK-NEXT: entry:
2233 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmuh.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2234 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2236 v8u32 xvmuh_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmuh_wu(_1, _2); }
2237 // CHECK-LABEL: @xvmuh_du(
2238 // CHECK-NEXT: entry:
2239 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmuh.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2240 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2242 v4u64 xvmuh_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmuh_du(_1, _2); }
2243 // CHECK-LABEL: @xvsllwil_h_b(
2244 // CHECK-NEXT: entry:
2245 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsllwil.h.b(<32 x i8> [[_1:%.*]], i32 1)
2246 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2248 v16i16 xvsllwil_h_b(v32i8 _1) { return __builtin_lasx_xvsllwil_h_b(_1, 1); }
2249 // CHECK-LABEL: @xvsllwil_w_h(
2250 // CHECK-NEXT: entry:
2251 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsllwil.w.h(<16 x i16> [[_1:%.*]], i32 1)
2252 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2254 v8i32 xvsllwil_w_h(v16i16 _1) { return __builtin_lasx_xvsllwil_w_h(_1, 1); }
2255 // CHECK-LABEL: @xvsllwil_d_w(
2256 // CHECK-NEXT: entry:
2257 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsllwil.d.w(<8 x i32> [[_1:%.*]], i32 1)
2258 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2260 v4i64 xvsllwil_d_w(v8i32 _1) { return __builtin_lasx_xvsllwil_d_w(_1, 1); }
2261 // CHECK-LABEL: @xvsllwil_hu_bu(
2262 // CHECK-NEXT: entry:
2263 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsllwil.hu.bu(<32 x i8> [[_1:%.*]], i32 1)
2264 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2266 v16u16 xvsllwil_hu_bu(v32u8 _1) { return __builtin_lasx_xvsllwil_hu_bu(_1, 1); }
2267 // CHECK-LABEL: @xvsllwil_wu_hu(
2268 // CHECK-NEXT: entry:
2269 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsllwil.wu.hu(<16 x i16> [[_1:%.*]], i32 1)
2270 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2272 v8u32 xvsllwil_wu_hu(v16u16 _1) { return __builtin_lasx_xvsllwil_wu_hu(_1, 1); }
2273 // CHECK-LABEL: @xvsllwil_du_wu(
2274 // CHECK-NEXT: entry:
2275 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsllwil.du.wu(<8 x i32> [[_1:%.*]], i32 1)
2276 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2278 v4u64 xvsllwil_du_wu(v8u32 _1) { return __builtin_lasx_xvsllwil_du_wu(_1, 1); }
2279 // CHECK-LABEL: @xvsran_b_h(
2280 // CHECK-NEXT: entry:
2281 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsran.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2282 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2284 v32i8 xvsran_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsran_b_h(_1, _2); }
2285 // CHECK-LABEL: @xvsran_h_w(
2286 // CHECK-NEXT: entry:
2287 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsran.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2288 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2290 v16i16 xvsran_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsran_h_w(_1, _2); }
2291 // CHECK-LABEL: @xvsran_w_d(
2292 // CHECK-NEXT: entry:
2293 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsran.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2294 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2296 v8i32 xvsran_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsran_w_d(_1, _2); }
2297 // CHECK-LABEL: @xvssran_b_h(
2298 // CHECK-NEXT: entry:
2299 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssran.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2300 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2302 v32i8 xvssran_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssran_b_h(_1, _2); }
2303 // CHECK-LABEL: @xvssran_h_w(
2304 // CHECK-NEXT: entry:
2305 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssran.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2306 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2308 v16i16 xvssran_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssran_h_w(_1, _2); }
2309 // CHECK-LABEL: @xvssran_w_d(
2310 // CHECK-NEXT: entry:
2311 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssran.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2312 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2314 v8i32 xvssran_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssran_w_d(_1, _2); }
2315 // CHECK-LABEL: @xvssran_bu_h(
2316 // CHECK-NEXT: entry:
2317 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssran.bu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2318 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2320 v32u8 xvssran_bu_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvssran_bu_h(_1, _2); }
2321 // CHECK-LABEL: @xvssran_hu_w(
2322 // CHECK-NEXT: entry:
2323 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssran.hu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2324 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2326 v16u16 xvssran_hu_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvssran_hu_w(_1, _2); }
2327 // CHECK-LABEL: @xvssran_wu_d(
2328 // CHECK-NEXT: entry:
2329 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssran.wu.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2330 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2332 v8u32 xvssran_wu_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvssran_wu_d(_1, _2); }
2333 // CHECK-LABEL: @xvsrarn_b_h(
2334 // CHECK-NEXT: entry:
2335 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrarn.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2336 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2338 v32i8 xvsrarn_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrarn_b_h(_1, _2); }
2339 // CHECK-LABEL: @xvsrarn_h_w(
2340 // CHECK-NEXT: entry:
2341 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrarn.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2342 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2344 v16i16 xvsrarn_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrarn_h_w(_1, _2); }
2345 // CHECK-LABEL: @xvsrarn_w_d(
2346 // CHECK-NEXT: entry:
2347 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrarn.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2348 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2350 v8i32 xvsrarn_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrarn_w_d(_1, _2); }
2351 // CHECK-LABEL: @xvssrarn_b_h(
2352 // CHECK-NEXT: entry:
2353 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrarn.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2354 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2356 v32i8 xvssrarn_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrarn_b_h(_1, _2); }
2357 // CHECK-LABEL: @xvssrarn_h_w(
2358 // CHECK-NEXT: entry:
2359 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrarn.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2360 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2362 v16i16 xvssrarn_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrarn_h_w(_1, _2); }
2363 // CHECK-LABEL: @xvssrarn_w_d(
2364 // CHECK-NEXT: entry:
2365 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrarn.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2366 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2368 v8i32 xvssrarn_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrarn_w_d(_1, _2); }
2369 // CHECK-LABEL: @xvssrarn_bu_h(
2370 // CHECK-NEXT: entry:
2371 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrarn.bu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2372 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2374 v32u8 xvssrarn_bu_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvssrarn_bu_h(_1, _2); }
2375 // CHECK-LABEL: @xvssrarn_hu_w(
2376 // CHECK-NEXT: entry:
2377 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrarn.hu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2378 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2380 v16u16 xvssrarn_hu_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvssrarn_hu_w(_1, _2); }
2381 // CHECK-LABEL: @xvssrarn_wu_d(
2382 // CHECK-NEXT: entry:
2383 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrarn.wu.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2384 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2386 v8u32 xvssrarn_wu_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvssrarn_wu_d(_1, _2); }
2387 // CHECK-LABEL: @xvsrln_b_h(
2388 // CHECK-NEXT: entry:
2389 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrln.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2390 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2392 v32i8 xvsrln_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrln_b_h(_1, _2); }
2393 // CHECK-LABEL: @xvsrln_h_w(
2394 // CHECK-NEXT: entry:
2395 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrln.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2396 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2398 v16i16 xvsrln_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrln_h_w(_1, _2); }
2399 // CHECK-LABEL: @xvsrln_w_d(
2400 // CHECK-NEXT: entry:
2401 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrln.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2402 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2404 v8i32 xvsrln_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrln_w_d(_1, _2); }
2405 // CHECK-LABEL: @xvssrln_bu_h(
2406 // CHECK-NEXT: entry:
2407 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrln.bu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2408 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2410 v32u8 xvssrln_bu_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvssrln_bu_h(_1, _2); }
2411 // CHECK-LABEL: @xvssrln_hu_w(
2412 // CHECK-NEXT: entry:
2413 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrln.hu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2414 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2416 v16u16 xvssrln_hu_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvssrln_hu_w(_1, _2); }
2417 // CHECK-LABEL: @xvssrln_wu_d(
2418 // CHECK-NEXT: entry:
2419 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrln.wu.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2420 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2422 v8u32 xvssrln_wu_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvssrln_wu_d(_1, _2); }
2423 // CHECK-LABEL: @xvsrlrn_b_h(
2424 // CHECK-NEXT: entry:
2425 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrlrn.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2426 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2428 v32i8 xvsrlrn_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrlrn_b_h(_1, _2); }
2429 // CHECK-LABEL: @xvsrlrn_h_w(
2430 // CHECK-NEXT: entry:
2431 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrlrn.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2432 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2434 v16i16 xvsrlrn_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrlrn_h_w(_1, _2); }
2435 // CHECK-LABEL: @xvsrlrn_w_d(
2436 // CHECK-NEXT: entry:
2437 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrlrn.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2438 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2440 v8i32 xvsrlrn_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrlrn_w_d(_1, _2); }
2441 // CHECK-LABEL: @xvssrlrn_bu_h(
2442 // CHECK-NEXT: entry:
2443 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlrn.bu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2444 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2446 v32u8 xvssrlrn_bu_h(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvssrlrn_bu_h(_1, _2); }
2447 // CHECK-LABEL: @xvssrlrn_hu_w(
2448 // CHECK-NEXT: entry:
2449 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlrn.hu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2450 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2452 v16u16 xvssrlrn_hu_w(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvssrlrn_hu_w(_1, _2); }
2453 // CHECK-LABEL: @xvssrlrn_wu_d(
2454 // CHECK-NEXT: entry:
2455 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlrn.wu.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2456 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2458 v8u32 xvssrlrn_wu_d(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvssrlrn_wu_d(_1, _2); }
2459 // CHECK-LABEL: @xvfrstpi_b(
2460 // CHECK-NEXT: entry:
2461 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvfrstpi.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
2462 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2464 v32i8 xvfrstpi_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvfrstpi_b(_1, _2, 1); }
2465 // CHECK-LABEL: @xvfrstpi_h(
2466 // CHECK-NEXT: entry:
2467 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvfrstpi.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
2468 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2470 v16i16 xvfrstpi_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvfrstpi_h(_1, _2, 1); }
2471 // CHECK-LABEL: @xvfrstp_b(
2472 // CHECK-NEXT: entry:
2473 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvfrstp.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
2474 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2476 v32i8 xvfrstp_b(v32i8 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvfrstp_b(_1, _2, _3); }
2477 // CHECK-LABEL: @xvfrstp_h(
2478 // CHECK-NEXT: entry:
2479 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvfrstp.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
2480 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2482 v16i16 xvfrstp_h(v16i16 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvfrstp_h(_1, _2, _3); }
2483 // CHECK-LABEL: @xvshuf4i_d(
2484 // CHECK-NEXT: entry:
2485 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvshuf4i.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
2486 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2488 v4i64 xvshuf4i_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvshuf4i_d(_1, _2, 1); }
2489 // CHECK-LABEL: @xvbsrl_v(
2490 // CHECK-NEXT: entry:
2491 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbsrl.v(<32 x i8> [[_1:%.*]], i32 1)
2492 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2494 v32i8 xvbsrl_v(v32i8 _1) { return __builtin_lasx_xvbsrl_v(_1, 1); }
2495 // CHECK-LABEL: @xvbsll_v(
2496 // CHECK-NEXT: entry:
2497 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvbsll.v(<32 x i8> [[_1:%.*]], i32 1)
2498 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2500 v32i8 xvbsll_v(v32i8 _1) { return __builtin_lasx_xvbsll_v(_1, 1); }
2501 // CHECK-LABEL: @xvextrins_b(
2502 // CHECK-NEXT: entry:
2503 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvextrins.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
2504 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2506 v32i8 xvextrins_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvextrins_b(_1, _2, 1); }
2507 // CHECK-LABEL: @xvextrins_h(
2508 // CHECK-NEXT: entry:
2509 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvextrins.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
2510 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2512 v16i16 xvextrins_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvextrins_h(_1, _2, 1); }
2513 // CHECK-LABEL: @xvextrins_w(
2514 // CHECK-NEXT: entry:
2515 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvextrins.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
2516 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2518 v8i32 xvextrins_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvextrins_w(_1, _2, 1); }
2519 // CHECK-LABEL: @xvextrins_d(
2520 // CHECK-NEXT: entry:
2521 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvextrins.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
2522 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2524 v4i64 xvextrins_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvextrins_d(_1, _2, 1); }
2525 // CHECK-LABEL: @xvmskltz_b(
2526 // CHECK-NEXT: entry:
2527 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmskltz.b(<32 x i8> [[_1:%.*]])
2528 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2530 v32i8 xvmskltz_b(v32i8 _1) { return __builtin_lasx_xvmskltz_b(_1); }
2531 // CHECK-LABEL: @xvmskltz_h(
2532 // CHECK-NEXT: entry:
2533 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmskltz.h(<16 x i16> [[_1:%.*]])
2534 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2536 v16i16 xvmskltz_h(v16i16 _1) { return __builtin_lasx_xvmskltz_h(_1); }
2537 // CHECK-LABEL: @xvmskltz_w(
2538 // CHECK-NEXT: entry:
2539 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmskltz.w(<8 x i32> [[_1:%.*]])
2540 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2542 v8i32 xvmskltz_w(v8i32 _1) { return __builtin_lasx_xvmskltz_w(_1); }
2543 // CHECK-LABEL: @xvmskltz_d(
2544 // CHECK-NEXT: entry:
2545 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmskltz.d(<4 x i64> [[_1:%.*]])
2546 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2548 v4i64 xvmskltz_d(v4i64 _1) { return __builtin_lasx_xvmskltz_d(_1); }
2549 // CHECK-LABEL: @xvsigncov_b(
2550 // CHECK-NEXT: entry:
2551 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsigncov.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
2552 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2554 v32i8 xvsigncov_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsigncov_b(_1, _2); }
2555 // CHECK-LABEL: @xvsigncov_h(
2556 // CHECK-NEXT: entry:
2557 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsigncov.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2558 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2560 v16i16 xvsigncov_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsigncov_h(_1, _2); }
2561 // CHECK-LABEL: @xvsigncov_w(
2562 // CHECK-NEXT: entry:
2563 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsigncov.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2564 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2566 v8i32 xvsigncov_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsigncov_w(_1, _2); }
2567 // CHECK-LABEL: @xvsigncov_d(
2568 // CHECK-NEXT: entry:
2569 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsigncov.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2570 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2572 v4i64 xvsigncov_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsigncov_d(_1, _2); }
2573 // CHECK-LABEL: @xvfmadd_s(
2574 // CHECK-NEXT: entry:
2575 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmadd.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]], <8 x float> [[_3:%.*]])
2576 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2578 v8f32 xvfmadd_s(v8f32 _1, v8f32 _2, v8f32 _3) { return __builtin_lasx_xvfmadd_s(_1, _2, _3); }
2579 // CHECK-LABEL: @xvfmadd_d(
2580 // CHECK-NEXT: entry:
2581 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmadd.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]], <4 x double> [[_3:%.*]])
2582 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2584 v4f64 xvfmadd_d(v4f64 _1, v4f64 _2, v4f64 _3) { return __builtin_lasx_xvfmadd_d(_1, _2, _3); }
2585 // CHECK-LABEL: @xvfmsub_s(
2586 // CHECK-NEXT: entry:
2587 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfmsub.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]], <8 x float> [[_3:%.*]])
2588 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2590 v8f32 xvfmsub_s(v8f32 _1, v8f32 _2, v8f32 _3) { return __builtin_lasx_xvfmsub_s(_1, _2, _3); }
2591 // CHECK-LABEL: @xvfmsub_d(
2592 // CHECK-NEXT: entry:
2593 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfmsub.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]], <4 x double> [[_3:%.*]])
2594 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2596 v4f64 xvfmsub_d(v4f64 _1, v4f64 _2, v4f64 _3) { return __builtin_lasx_xvfmsub_d(_1, _2, _3); }
2597 // CHECK-LABEL: @xvfnmadd_s(
2598 // CHECK-NEXT: entry:
2599 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfnmadd.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]], <8 x float> [[_3:%.*]])
2600 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2602 v8f32 xvfnmadd_s(v8f32 _1, v8f32 _2, v8f32 _3) { return __builtin_lasx_xvfnmadd_s(_1, _2, _3); }
2603 // CHECK-LABEL: @xvfnmadd_d(
2604 // CHECK-NEXT: entry:
2605 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfnmadd.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]], <4 x double> [[_3:%.*]])
2606 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2608 v4f64 xvfnmadd_d(v4f64 _1, v4f64 _2, v4f64 _3) { return __builtin_lasx_xvfnmadd_d(_1, _2, _3); }
2609 // CHECK-LABEL: @xvfnmsub_s(
2610 // CHECK-NEXT: entry:
2611 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfnmsub.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]], <8 x float> [[_3:%.*]])
2612 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2614 v8f32 xvfnmsub_s(v8f32 _1, v8f32 _2, v8f32 _3) { return __builtin_lasx_xvfnmsub_s(_1, _2, _3); }
2615 // CHECK-LABEL: @xvfnmsub_d(
2616 // CHECK-NEXT: entry:
2617 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfnmsub.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]], <4 x double> [[_3:%.*]])
2618 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2620 v4f64 xvfnmsub_d(v4f64 _1, v4f64 _2, v4f64 _3) { return __builtin_lasx_xvfnmsub_d(_1, _2, _3); }
2621 // CHECK-LABEL: @xvftintrne_w_s(
2622 // CHECK-NEXT: entry:
2623 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.s(<8 x float> [[_1:%.*]])
2624 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2626 v8i32 xvftintrne_w_s(v8f32 _1) { return __builtin_lasx_xvftintrne_w_s(_1); }
2627 // CHECK-LABEL: @xvftintrne_l_d(
2628 // CHECK-NEXT: entry:
2629 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrne.l.d(<4 x double> [[_1:%.*]])
2630 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2632 v4i64 xvftintrne_l_d(v4f64 _1) { return __builtin_lasx_xvftintrne_l_d(_1); }
2633 // CHECK-LABEL: @xvftintrp_w_s(
2634 // CHECK-NEXT: entry:
2635 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.s(<8 x float> [[_1:%.*]])
2636 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2638 v8i32 xvftintrp_w_s(v8f32 _1) { return __builtin_lasx_xvftintrp_w_s(_1); }
2639 // CHECK-LABEL: @xvftintrp_l_d(
2640 // CHECK-NEXT: entry:
2641 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrp.l.d(<4 x double> [[_1:%.*]])
2642 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2644 v4i64 xvftintrp_l_d(v4f64 _1) { return __builtin_lasx_xvftintrp_l_d(_1); }
2645 // CHECK-LABEL: @xvftintrm_w_s(
2646 // CHECK-NEXT: entry:
2647 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.s(<8 x float> [[_1:%.*]])
2648 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2650 v8i32 xvftintrm_w_s(v8f32 _1) { return __builtin_lasx_xvftintrm_w_s(_1); }
2651 // CHECK-LABEL: @xvftintrm_l_d(
2652 // CHECK-NEXT: entry:
2653 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrm.l.d(<4 x double> [[_1:%.*]])
2654 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2656 v4i64 xvftintrm_l_d(v4f64 _1) { return __builtin_lasx_xvftintrm_l_d(_1); }
2657 // CHECK-LABEL: @xvftint_w_d(
2658 // CHECK-NEXT: entry:
2659 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftint.w.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
2660 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2662 v8i32 xvftint_w_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvftint_w_d(_1, _2); }
2663 // CHECK-LABEL: @xvffint_s_l(
2664 // CHECK-NEXT: entry:
2665 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvffint.s.l(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2666 // CHECK-NEXT: ret <8 x float> [[TMP0]]
2668 v8f32 xvffint_s_l(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvffint_s_l(_1, _2); }
2669 // CHECK-LABEL: @xvftintrz_w_d(
2670 // CHECK-NEXT: entry:
2671 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
2672 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2674 v8i32 xvftintrz_w_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvftintrz_w_d(_1, _2); }
2675 // CHECK-LABEL: @xvftintrp_w_d(
2676 // CHECK-NEXT: entry:
2677 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
2678 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2680 v8i32 xvftintrp_w_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvftintrp_w_d(_1, _2); }
2681 // CHECK-LABEL: @xvftintrm_w_d(
2682 // CHECK-NEXT: entry:
2683 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
2684 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2686 v8i32 xvftintrm_w_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvftintrm_w_d(_1, _2); }
2687 // CHECK-LABEL: @xvftintrne_w_d(
2688 // CHECK-NEXT: entry:
2689 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
2690 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2692 v8i32 xvftintrne_w_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvftintrne_w_d(_1, _2); }
2693 // CHECK-LABEL: @xvftinth_l_s(
2694 // CHECK-NEXT: entry:
2695 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftinth.l.s(<8 x float> [[_1:%.*]])
2696 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2698 v4i64 xvftinth_l_s(v8f32 _1) { return __builtin_lasx_xvftinth_l_s(_1); }
2699 // CHECK-LABEL: @xvftintl_l_s(
2700 // CHECK-NEXT: entry:
2701 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintl.l.s(<8 x float> [[_1:%.*]])
2702 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2704 v4i64 xvftintl_l_s(v8f32 _1) { return __builtin_lasx_xvftintl_l_s(_1); }
2705 // CHECK-LABEL: @xvffinth_d_w(
2706 // CHECK-NEXT: entry:
2707 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvffinth.d.w(<8 x i32> [[_1:%.*]])
2708 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2710 v4f64 xvffinth_d_w(v8i32 _1) { return __builtin_lasx_xvffinth_d_w(_1); }
2711 // CHECK-LABEL: @xvffintl_d_w(
2712 // CHECK-NEXT: entry:
2713 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvffintl.d.w(<8 x i32> [[_1:%.*]])
2714 // CHECK-NEXT: ret <4 x double> [[TMP0]]
2716 v4f64 xvffintl_d_w(v8i32 _1) { return __builtin_lasx_xvffintl_d_w(_1); }
2717 // CHECK-LABEL: @xvftintrzh_l_s(
2718 // CHECK-NEXT: entry:
2719 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrzh.l.s(<8 x float> [[_1:%.*]])
2720 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2722 v4i64 xvftintrzh_l_s(v8f32 _1) { return __builtin_lasx_xvftintrzh_l_s(_1); }
2723 // CHECK-LABEL: @xvftintrzl_l_s(
2724 // CHECK-NEXT: entry:
2725 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrzl.l.s(<8 x float> [[_1:%.*]])
2726 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2728 v4i64 xvftintrzl_l_s(v8f32 _1) { return __builtin_lasx_xvftintrzl_l_s(_1); }
2729 // CHECK-LABEL: @xvftintrph_l_s(
2730 // CHECK-NEXT: entry:
2731 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrph.l.s(<8 x float> [[_1:%.*]])
2732 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2734 v4i64 xvftintrph_l_s(v8f32 _1) { return __builtin_lasx_xvftintrph_l_s(_1); }
2735 // CHECK-LABEL: @xvftintrpl_l_s(
2736 // CHECK-NEXT: entry:
2737 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrpl.l.s(<8 x float> [[_1:%.*]])
2738 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2740 v4i64 xvftintrpl_l_s(v8f32 _1) { return __builtin_lasx_xvftintrpl_l_s(_1); }
2741 // CHECK-LABEL: @xvftintrmh_l_s(
2742 // CHECK-NEXT: entry:
2743 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrmh.l.s(<8 x float> [[_1:%.*]])
2744 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2746 v4i64 xvftintrmh_l_s(v8f32 _1) { return __builtin_lasx_xvftintrmh_l_s(_1); }
2747 // CHECK-LABEL: @xvftintrml_l_s(
2748 // CHECK-NEXT: entry:
2749 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrml.l.s(<8 x float> [[_1:%.*]])
2750 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2752 v4i64 xvftintrml_l_s(v8f32 _1) { return __builtin_lasx_xvftintrml_l_s(_1); }
2753 // CHECK-LABEL: @xvftintrneh_l_s(
2754 // CHECK-NEXT: entry:
2755 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrneh.l.s(<8 x float> [[_1:%.*]])
2756 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2758 v4i64 xvftintrneh_l_s(v8f32 _1) { return __builtin_lasx_xvftintrneh_l_s(_1); }
2759 // CHECK-LABEL: @xvftintrnel_l_s(
2760 // CHECK-NEXT: entry:
2761 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvftintrnel.l.s(<8 x float> [[_1:%.*]])
2762 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2764 v4i64 xvftintrnel_l_s(v8f32 _1) { return __builtin_lasx_xvftintrnel_l_s(_1); }
2765 // CHECK-LABEL: @xvfrintrne_s(
2766 // CHECK-NEXT: entry:
2767 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrintrne.s(<8 x float> [[_1:%.*]])
2768 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[TMP0]] to <8 x i32>
2769 // CHECK-NEXT: ret <8 x i32> [[TMP1]]
2771 v8i32 xvfrintrne_s(v8f32 _1) { return __builtin_lasx_xvfrintrne_s(_1); }
2772 // CHECK-LABEL: @xvfrintrne_d(
2773 // CHECK-NEXT: entry:
2774 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrintrne.d(<4 x double> [[_1:%.*]])
2775 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[TMP0]] to <4 x i64>
2776 // CHECK-NEXT: ret <4 x i64> [[TMP1]]
2778 v4i64 xvfrintrne_d(v4f64 _1) { return __builtin_lasx_xvfrintrne_d(_1); }
2779 // CHECK-LABEL: @xvfrintrz_s(
2780 // CHECK-NEXT: entry:
2781 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrintrz.s(<8 x float> [[_1:%.*]])
2782 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[TMP0]] to <8 x i32>
2783 // CHECK-NEXT: ret <8 x i32> [[TMP1]]
2785 v8i32 xvfrintrz_s(v8f32 _1) { return __builtin_lasx_xvfrintrz_s(_1); }
2786 // CHECK-LABEL: @xvfrintrz_d(
2787 // CHECK-NEXT: entry:
2788 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrintrz.d(<4 x double> [[_1:%.*]])
2789 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[TMP0]] to <4 x i64>
2790 // CHECK-NEXT: ret <4 x i64> [[TMP1]]
2792 v4i64 xvfrintrz_d(v4f64 _1) { return __builtin_lasx_xvfrintrz_d(_1); }
2793 // CHECK-LABEL: @xvfrintrp_s(
2794 // CHECK-NEXT: entry:
2795 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrintrp.s(<8 x float> [[_1:%.*]])
2796 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[TMP0]] to <8 x i32>
2797 // CHECK-NEXT: ret <8 x i32> [[TMP1]]
2799 v8i32 xvfrintrp_s(v8f32 _1) { return __builtin_lasx_xvfrintrp_s(_1); }
2800 // CHECK-LABEL: @xvfrintrp_d(
2801 // CHECK-NEXT: entry:
2802 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrintrp.d(<4 x double> [[_1:%.*]])
2803 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[TMP0]] to <4 x i64>
2804 // CHECK-NEXT: ret <4 x i64> [[TMP1]]
2806 v4i64 xvfrintrp_d(v4f64 _1) { return __builtin_lasx_xvfrintrp_d(_1); }
2807 // CHECK-LABEL: @xvfrintrm_s(
2808 // CHECK-NEXT: entry:
2809 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvfrintrm.s(<8 x float> [[_1:%.*]])
2810 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[TMP0]] to <8 x i32>
2811 // CHECK-NEXT: ret <8 x i32> [[TMP1]]
2813 v8i32 xvfrintrm_s(v8f32 _1) { return __builtin_lasx_xvfrintrm_s(_1); }
2814 // CHECK-LABEL: @xvfrintrm_d(
2815 // CHECK-NEXT: entry:
2816 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvfrintrm.d(<4 x double> [[_1:%.*]])
2817 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[TMP0]] to <4 x i64>
2818 // CHECK-NEXT: ret <4 x i64> [[TMP1]]
2820 v4i64 xvfrintrm_d(v4f64 _1) { return __builtin_lasx_xvfrintrm_d(_1); }
2821 // CHECK-LABEL: @xvld(
2822 // CHECK-NEXT: entry:
2823 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvld(ptr [[_1:%.*]], i32 1)
2824 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2826 v32i8 xvld(void *_1) { return __builtin_lasx_xvld(_1, 1); }
2827 // CHECK-LABEL: @xvst(
2828 // CHECK-NEXT: entry:
2829 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvst(<32 x i8> [[_1:%.*]], ptr [[_2:%.*]], i32 1)
2830 // CHECK-NEXT: ret void
2832 void xvst(v32i8 _1, void *_2) { return __builtin_lasx_xvst(_1, _2, 1); }
2833 // CHECK-LABEL: @xvstelm_b(
2834 // CHECK-NEXT: entry:
2835 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> [[_1:%.*]], ptr [[_2:%.*]], i32 1, i32 1)
2836 // CHECK-NEXT: ret void
2838 void xvstelm_b(v32i8 _1, void * _2) { return __builtin_lasx_xvstelm_b(_1, _2, 1, 1); }
2839 // CHECK-LABEL: @xvstelm_h(
2840 // CHECK-NEXT: entry:
2841 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> [[_1:%.*]], ptr [[_2:%.*]], i32 2, i32 1)
2842 // CHECK-NEXT: ret void
2844 void xvstelm_h(v16i16 _1, void * _2) { return __builtin_lasx_xvstelm_h(_1, _2, 2, 1); }
2845 // CHECK-LABEL: @xvstelm_w(
2846 // CHECK-NEXT: entry:
2847 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> [[_1:%.*]], ptr [[_2:%.*]], i32 4, i32 1)
2848 // CHECK-NEXT: ret void
2850 void xvstelm_w(v8i32 _1, void * _2) { return __builtin_lasx_xvstelm_w(_1, _2, 4, 1); }
2851 // CHECK-LABEL: @xvstelm_d(
2852 // CHECK-NEXT: entry:
2853 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> [[_1:%.*]], ptr [[_2:%.*]], i32 8, i32 1)
2854 // CHECK-NEXT: ret void
2856 void xvstelm_d(v4i64 _1, void * _2) { return __builtin_lasx_xvstelm_d(_1, _2, 8, 1); }
2857 // CHECK-LABEL: @xvinsve0_w(
2858 // CHECK-NEXT: entry:
2859 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvinsve0.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
2860 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2862 v8i32 xvinsve0_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvinsve0_w(_1, _2, 1); }
2863 // CHECK-LABEL: @xvinsve0_d(
2864 // CHECK-NEXT: entry:
2865 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvinsve0.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
2866 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2868 v4i64 xvinsve0_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvinsve0_d(_1, _2, 1); }
2869 // CHECK-LABEL: @xvpickve_w(
2870 // CHECK-NEXT: entry:
2871 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvpickve.w(<8 x i32> [[_1:%.*]], i32 1)
2872 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2874 v8i32 xvpickve_w(v8i32 _1) { return __builtin_lasx_xvpickve_w(_1, 1); }
2875 // CHECK-LABEL: @xvpickve_d(
2876 // CHECK-NEXT: entry:
2877 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpickve.d(<4 x i64> [[_1:%.*]], i32 1)
2878 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2880 v4i64 xvpickve_d(v4i64 _1) { return __builtin_lasx_xvpickve_d(_1, 1); }
2881 // CHECK-LABEL: @xvssrlrn_b_h(
2882 // CHECK-NEXT: entry:
2883 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlrn.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2884 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2886 v32i8 xvssrlrn_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrlrn_b_h(_1, _2); }
2887 // CHECK-LABEL: @xvssrlrn_h_w(
2888 // CHECK-NEXT: entry:
2889 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlrn.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2890 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2892 v16i16 xvssrlrn_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrlrn_h_w(_1, _2); }
2893 // CHECK-LABEL: @xvssrlrn_w_d(
2894 // CHECK-NEXT: entry:
2895 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlrn.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2896 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2898 v8i32 xvssrlrn_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrlrn_w_d(_1, _2); }
2899 // CHECK-LABEL: @xvssrln_b_h(
2900 // CHECK-NEXT: entry:
2901 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrln.b.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
2902 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2904 v32i8 xvssrln_b_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrln_b_h(_1, _2); }
2905 // CHECK-LABEL: @xvssrln_h_w(
2906 // CHECK-NEXT: entry:
2907 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrln.h.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
2908 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2910 v16i16 xvssrln_h_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrln_h_w(_1, _2); }
2911 // CHECK-LABEL: @xvssrln_w_d(
2912 // CHECK-NEXT: entry:
2913 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrln.w.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
2914 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2916 v8i32 xvssrln_w_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrln_w_d(_1, _2); }
2917 // CHECK-LABEL: @xvorn_v(
2918 // CHECK-NEXT: entry:
2919 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvorn.v(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
2920 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2922 v32i8 xvorn_v(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvorn_v(_1, _2); }
2923 // CHECK-LABEL: @xvldi(
2924 // CHECK-NEXT: entry:
2925 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvldi(i32 1)
2926 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2928 v4i64 xvldi() { return __builtin_lasx_xvldi(1); }
2929 // CHECK-LABEL: @xvldx(
2930 // CHECK-NEXT: entry:
2931 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvldx(ptr [[_1:%.*]], i64 1)
2932 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2934 v32i8 xvldx(void *_1) { return __builtin_lasx_xvldx(_1, 1); }
2935 // CHECK-LABEL: @xvstx(
2936 // CHECK-NEXT: entry:
2937 // CHECK-NEXT: tail call void @llvm.loongarch.lasx.xvstx(<32 x i8> [[_1:%.*]], ptr [[_2:%.*]], i64 1)
2938 // CHECK-NEXT: ret void
2940 void xvstx(v32i8 _1, void *_2) { return __builtin_lasx_xvstx(_1, _2, 1); }
2941 // CHECK-LABEL: @xvextl_qu_du(
2942 // CHECK-NEXT: entry:
2943 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvextl.qu.du(<4 x i64> [[_1:%.*]])
2944 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2946 v4u64 xvextl_qu_du(v4u64 _1) { return __builtin_lasx_xvextl_qu_du(_1); }
2947 // CHECK-LABEL: @xvinsgr2vr_w(
2948 // CHECK-NEXT: entry:
2949 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvinsgr2vr.w(<8 x i32> [[_1:%.*]], i32 1, i32 1)
2950 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2952 v8i32 xvinsgr2vr_w(v8i32 _1) { return __builtin_lasx_xvinsgr2vr_w(_1, 1, 1); }
2953 // CHECK-LABEL: @xvinsgr2vr_d(
2954 // CHECK-NEXT: entry:
2955 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64> [[_1:%.*]], i64 1, i32 1)
2956 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2958 v4i64 xvinsgr2vr_d(v4i64 _1) { return __builtin_lasx_xvinsgr2vr_d(_1, 1, 1); }
2959 // CHECK-LABEL: @xvreplve0_b(
2960 // CHECK-NEXT: entry:
2961 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvreplve0.b(<32 x i8> [[_1:%.*]])
2962 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2964 v32i8 xvreplve0_b(v32i8 _1) { return __builtin_lasx_xvreplve0_b(_1); }
2965 // CHECK-LABEL: @xvreplve0_h(
2966 // CHECK-NEXT: entry:
2967 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvreplve0.h(<16 x i16> [[_1:%.*]])
2968 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2970 v16i16 xvreplve0_h(v16i16 _1) { return __builtin_lasx_xvreplve0_h(_1); }
2971 // CHECK-LABEL: @xvreplve0_w(
2972 // CHECK-NEXT: entry:
2973 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvreplve0.w(<8 x i32> [[_1:%.*]])
2974 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
2976 v8i32 xvreplve0_w(v8i32 _1) { return __builtin_lasx_xvreplve0_w(_1); }
2977 // CHECK-LABEL: @xvreplve0_d(
2978 // CHECK-NEXT: entry:
2979 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvreplve0.d(<4 x i64> [[_1:%.*]])
2980 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
2982 v4i64 xvreplve0_d(v4i64 _1) { return __builtin_lasx_xvreplve0_d(_1); }
2983 // CHECK-LABEL: @xvreplve0_q(
2984 // CHECK-NEXT: entry:
2985 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvreplve0.q(<32 x i8> [[_1:%.*]])
2986 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
2988 v32i8 xvreplve0_q(v32i8 _1) { return __builtin_lasx_xvreplve0_q(_1); }
2989 // CHECK-LABEL: @vext2xv_h_b(
2990 // CHECK-NEXT: entry:
2991 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.vext2xv.h.b(<32 x i8> [[_1:%.*]])
2992 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
2994 v16i16 vext2xv_h_b(v32i8 _1) { return __builtin_lasx_vext2xv_h_b(_1); }
2995 // CHECK-LABEL: @vext2xv_w_h(
2996 // CHECK-NEXT: entry:
2997 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.vext2xv.w.h(<16 x i16> [[_1:%.*]])
2998 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3000 v8i32 vext2xv_w_h(v16i16 _1) { return __builtin_lasx_vext2xv_w_h(_1); }
3001 // CHECK-LABEL: @vext2xv_d_w(
3002 // CHECK-NEXT: entry:
3003 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.w(<8 x i32> [[_1:%.*]])
3004 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3006 v4i64 vext2xv_d_w(v8i32 _1) { return __builtin_lasx_vext2xv_d_w(_1); }
3007 // CHECK-LABEL: @vext2xv_w_b(
3008 // CHECK-NEXT: entry:
3009 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.vext2xv.w.b(<32 x i8> [[_1:%.*]])
3010 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3012 v8i32 vext2xv_w_b(v32i8 _1) { return __builtin_lasx_vext2xv_w_b(_1); }
3013 // CHECK-LABEL: @vext2xv_d_h(
3014 // CHECK-NEXT: entry:
3015 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.h(<16 x i16> [[_1:%.*]])
3016 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3018 v4i64 vext2xv_d_h(v16i16 _1) { return __builtin_lasx_vext2xv_d_h(_1); }
3019 // CHECK-LABEL: @vext2xv_d_b(
3020 // CHECK-NEXT: entry:
3021 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.b(<32 x i8> [[_1:%.*]])
3022 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3024 v4i64 vext2xv_d_b(v32i8 _1) { return __builtin_lasx_vext2xv_d_b(_1); }
3025 // CHECK-LABEL: @vext2xv_hu_bu(
3026 // CHECK-NEXT: entry:
3027 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.vext2xv.hu.bu(<32 x i8> [[_1:%.*]])
3028 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3030 v16i16 vext2xv_hu_bu(v32i8 _1) { return __builtin_lasx_vext2xv_hu_bu(_1); }
3031 // CHECK-LABEL: @vext2xv_wu_hu(
3032 // CHECK-NEXT: entry:
3033 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.hu(<16 x i16> [[_1:%.*]])
3034 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3036 v8i32 vext2xv_wu_hu(v16i16 _1) { return __builtin_lasx_vext2xv_wu_hu(_1); }
3037 // CHECK-LABEL: @vext2xv_du_wu(
3038 // CHECK-NEXT: entry:
3039 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.wu(<8 x i32> [[_1:%.*]])
3040 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3042 v4i64 vext2xv_du_wu(v8i32 _1) { return __builtin_lasx_vext2xv_du_wu(_1); }
3043 // CHECK-LABEL: @vext2xv_wu_bu(
3044 // CHECK-NEXT: entry:
3045 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.bu(<32 x i8> [[_1:%.*]])
3046 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3048 v8i32 vext2xv_wu_bu(v32i8 _1) { return __builtin_lasx_vext2xv_wu_bu(_1); }
3049 // CHECK-LABEL: @vext2xv_du_hu(
3050 // CHECK-NEXT: entry:
3051 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.hu(<16 x i16> [[_1:%.*]])
3052 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3054 v4i64 vext2xv_du_hu(v16i16 _1) { return __builtin_lasx_vext2xv_du_hu(_1); }
3055 // CHECK-LABEL: @vext2xv_du_bu(
3056 // CHECK-NEXT: entry:
3057 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.bu(<32 x i8> [[_1:%.*]])
3058 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3060 v4i64 vext2xv_du_bu(v32i8 _1) { return __builtin_lasx_vext2xv_du_bu(_1); }
3061 // CHECK-LABEL: @xvpermi_q(
3062 // CHECK-NEXT: entry:
3063 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3064 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3066 v32i8 xvpermi_q(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvpermi_q(_1, _2, 1); }
3067 // CHECK-LABEL: @xvpermi_d(
3068 // CHECK-NEXT: entry:
3069 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvpermi.d(<4 x i64> [[_1:%.*]], i32 1)
3070 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3072 v4i64 xvpermi_d(v4i64 _1) { return __builtin_lasx_xvpermi_d(_1, 1); }
3073 // CHECK-LABEL: @xvperm_w(
3074 // CHECK-NEXT: entry:
3075 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvperm.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3076 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3078 v8i32 xvperm_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvperm_w(_1, _2); }
3079 // CHECK-LABEL: @xvldrepl_b(
3080 // CHECK-NEXT: entry:
3081 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(ptr [[_1:%.*]], i32 1)
3082 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3084 v32i8 xvldrepl_b(void *_1) { return __builtin_lasx_xvldrepl_b(_1, 1); }
3085 // CHECK-LABEL: @xvldrepl_h(
3086 // CHECK-NEXT: entry:
3087 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(ptr [[_1:%.*]], i32 2)
3088 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3090 v16i16 xvldrepl_h(void *_1) { return __builtin_lasx_xvldrepl_h(_1, 2); }
3091 // CHECK-LABEL: @xvldrepl_w(
3092 // CHECK-NEXT: entry:
3093 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(ptr [[_1:%.*]], i32 4)
3094 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3096 v8i32 xvldrepl_w(void *_1) { return __builtin_lasx_xvldrepl_w(_1, 4); }
3097 // CHECK-LABEL: @xvldrepl_d(
3098 // CHECK-NEXT: entry:
3099 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(ptr [[_1:%.*]], i32 8)
3100 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3102 v4i64 xvldrepl_d(void *_1) { return __builtin_lasx_xvldrepl_d(_1, 8); }
3103 // CHECK-LABEL: @xvpickve2gr_w(
3104 // CHECK-NEXT: entry:
3105 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xvpickve2gr.w(<8 x i32> [[_1:%.*]], i32 1)
3106 // CHECK-NEXT: ret i32 [[TMP0]]
3108 int xvpickve2gr_w(v8i32 _1) { return __builtin_lasx_xvpickve2gr_w(_1, 1); }
3109 // CHECK-LABEL: @xvpickve2gr_wu(
3110 // CHECK-NEXT: entry:
3111 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32> [[_1:%.*]], i32 1)
3112 // CHECK-NEXT: ret i32 [[TMP0]]
3114 unsigned int xvpickve2gr_wu(v8i32 _1) { return __builtin_lasx_xvpickve2gr_wu(_1, 1); }
3115 // CHECK-LABEL: @xvpickve2gr_d(
3116 // CHECK-NEXT: entry:
3117 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> [[_1:%.*]], i32 1)
3118 // CHECK-NEXT: ret i64 [[TMP0]]
3120 long xvpickve2gr_d(v4i64 _1) { return __builtin_lasx_xvpickve2gr_d(_1, 1); }
3121 // CHECK-LABEL: @xvpickve2gr_du(
3122 // CHECK-NEXT: entry:
3123 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> [[_1:%.*]], i32 1)
3124 // CHECK-NEXT: ret i64 [[TMP0]]
3126 unsigned long int xvpickve2gr_du(v4i64 _1) { return __builtin_lasx_xvpickve2gr_du(_1, 1); }
3127 // CHECK-LABEL: @xvaddwev_q_d(
3128 // CHECK-NEXT: entry:
3129 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3130 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3132 v4i64 xvaddwev_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvaddwev_q_d(_1, _2); }
3133 // CHECK-LABEL: @xvaddwev_d_w(
3134 // CHECK-NEXT: entry:
3135 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3136 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3138 v4i64 xvaddwev_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvaddwev_d_w(_1, _2); }
3139 // CHECK-LABEL: @xvaddwev_w_h(
3140 // CHECK-NEXT: entry:
3141 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3142 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3144 v8i32 xvaddwev_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvaddwev_w_h(_1, _2); }
3145 // CHECK-LABEL: @xvaddwev_h_b(
3146 // CHECK-NEXT: entry:
3147 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3148 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3150 v16i16 xvaddwev_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvaddwev_h_b(_1, _2); }
3151 // CHECK-LABEL: @xvaddwev_q_du(
3152 // CHECK-NEXT: entry:
3153 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3154 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3156 v4i64 xvaddwev_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvaddwev_q_du(_1, _2); }
3157 // CHECK-LABEL: @xvaddwev_d_wu(
3158 // CHECK-NEXT: entry:
3159 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3160 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3162 v4i64 xvaddwev_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvaddwev_d_wu(_1, _2); }
3163 // CHECK-LABEL: @xvaddwev_w_hu(
3164 // CHECK-NEXT: entry:
3165 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3166 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3168 v8i32 xvaddwev_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvaddwev_w_hu(_1, _2); }
3169 // CHECK-LABEL: @xvaddwev_h_bu(
3170 // CHECK-NEXT: entry:
3171 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3172 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3174 v16i16 xvaddwev_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvaddwev_h_bu(_1, _2); }
3175 // CHECK-LABEL: @xvsubwev_q_d(
3176 // CHECK-NEXT: entry:
3177 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3178 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3180 v4i64 xvsubwev_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsubwev_q_d(_1, _2); }
3181 // CHECK-LABEL: @xvsubwev_d_w(
3182 // CHECK-NEXT: entry:
3183 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3184 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3186 v4i64 xvsubwev_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsubwev_d_w(_1, _2); }
3187 // CHECK-LABEL: @xvsubwev_w_h(
3188 // CHECK-NEXT: entry:
3189 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3190 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3192 v8i32 xvsubwev_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsubwev_w_h(_1, _2); }
3193 // CHECK-LABEL: @xvsubwev_h_b(
3194 // CHECK-NEXT: entry:
3195 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3196 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3198 v16i16 xvsubwev_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsubwev_h_b(_1, _2); }
3199 // CHECK-LABEL: @xvsubwev_q_du(
3200 // CHECK-NEXT: entry:
3201 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3202 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3204 v4i64 xvsubwev_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvsubwev_q_du(_1, _2); }
3205 // CHECK-LABEL: @xvsubwev_d_wu(
3206 // CHECK-NEXT: entry:
3207 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3208 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3210 v4i64 xvsubwev_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvsubwev_d_wu(_1, _2); }
3211 // CHECK-LABEL: @xvsubwev_w_hu(
3212 // CHECK-NEXT: entry:
3213 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3214 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3216 v8i32 xvsubwev_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvsubwev_w_hu(_1, _2); }
3217 // CHECK-LABEL: @xvsubwev_h_bu(
3218 // CHECK-NEXT: entry:
3219 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3220 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3222 v16i16 xvsubwev_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvsubwev_h_bu(_1, _2); }
3223 // CHECK-LABEL: @xvmulwev_q_d(
3224 // CHECK-NEXT: entry:
3225 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3226 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3228 v4i64 xvmulwev_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmulwev_q_d(_1, _2); }
3229 // CHECK-LABEL: @xvmulwev_d_w(
3230 // CHECK-NEXT: entry:
3231 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3232 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3234 v4i64 xvmulwev_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmulwev_d_w(_1, _2); }
3235 // CHECK-LABEL: @xvmulwev_w_h(
3236 // CHECK-NEXT: entry:
3237 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3238 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3240 v8i32 xvmulwev_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmulwev_w_h(_1, _2); }
3241 // CHECK-LABEL: @xvmulwev_h_b(
3242 // CHECK-NEXT: entry:
3243 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3244 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3246 v16i16 xvmulwev_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmulwev_h_b(_1, _2); }
3247 // CHECK-LABEL: @xvmulwev_q_du(
3248 // CHECK-NEXT: entry:
3249 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3250 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3252 v4i64 xvmulwev_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmulwev_q_du(_1, _2); }
3253 // CHECK-LABEL: @xvmulwev_d_wu(
3254 // CHECK-NEXT: entry:
3255 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3256 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3258 v4i64 xvmulwev_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmulwev_d_wu(_1, _2); }
3259 // CHECK-LABEL: @xvmulwev_w_hu(
3260 // CHECK-NEXT: entry:
3261 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3262 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3264 v8i32 xvmulwev_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmulwev_w_hu(_1, _2); }
3265 // CHECK-LABEL: @xvmulwev_h_bu(
3266 // CHECK-NEXT: entry:
3267 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3268 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3270 v16i16 xvmulwev_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmulwev_h_bu(_1, _2); }
3271 // CHECK-LABEL: @xvaddwod_q_d(
3272 // CHECK-NEXT: entry:
3273 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3274 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3276 v4i64 xvaddwod_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvaddwod_q_d(_1, _2); }
3277 // CHECK-LABEL: @xvaddwod_d_w(
3278 // CHECK-NEXT: entry:
3279 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3280 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3282 v4i64 xvaddwod_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvaddwod_d_w(_1, _2); }
3283 // CHECK-LABEL: @xvaddwod_w_h(
3284 // CHECK-NEXT: entry:
3285 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3286 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3288 v8i32 xvaddwod_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvaddwod_w_h(_1, _2); }
3289 // CHECK-LABEL: @xvaddwod_h_b(
3290 // CHECK-NEXT: entry:
3291 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3292 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3294 v16i16 xvaddwod_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvaddwod_h_b(_1, _2); }
3295 // CHECK-LABEL: @xvaddwod_q_du(
3296 // CHECK-NEXT: entry:
3297 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3298 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3300 v4i64 xvaddwod_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvaddwod_q_du(_1, _2); }
3301 // CHECK-LABEL: @xvaddwod_d_wu(
3302 // CHECK-NEXT: entry:
3303 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3304 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3306 v4i64 xvaddwod_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvaddwod_d_wu(_1, _2); }
3307 // CHECK-LABEL: @xvaddwod_w_hu(
3308 // CHECK-NEXT: entry:
3309 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3310 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3312 v8i32 xvaddwod_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvaddwod_w_hu(_1, _2); }
3313 // CHECK-LABEL: @xvaddwod_h_bu(
3314 // CHECK-NEXT: entry:
3315 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3316 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3318 v16i16 xvaddwod_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvaddwod_h_bu(_1, _2); }
3319 // CHECK-LABEL: @xvsubwod_q_d(
3320 // CHECK-NEXT: entry:
3321 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3322 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3324 v4i64 xvsubwod_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsubwod_q_d(_1, _2); }
3325 // CHECK-LABEL: @xvsubwod_d_w(
3326 // CHECK-NEXT: entry:
3327 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3328 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3330 v4i64 xvsubwod_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsubwod_d_w(_1, _2); }
3331 // CHECK-LABEL: @xvsubwod_w_h(
3332 // CHECK-NEXT: entry:
3333 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3334 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3336 v8i32 xvsubwod_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsubwod_w_h(_1, _2); }
3337 // CHECK-LABEL: @xvsubwod_h_b(
3338 // CHECK-NEXT: entry:
3339 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3340 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3342 v16i16 xvsubwod_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsubwod_h_b(_1, _2); }
3343 // CHECK-LABEL: @xvsubwod_q_du(
3344 // CHECK-NEXT: entry:
3345 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3346 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3348 v4i64 xvsubwod_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvsubwod_q_du(_1, _2); }
3349 // CHECK-LABEL: @xvsubwod_d_wu(
3350 // CHECK-NEXT: entry:
3351 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3352 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3354 v4i64 xvsubwod_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvsubwod_d_wu(_1, _2); }
3355 // CHECK-LABEL: @xvsubwod_w_hu(
3356 // CHECK-NEXT: entry:
3357 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3358 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3360 v8i32 xvsubwod_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvsubwod_w_hu(_1, _2); }
3361 // CHECK-LABEL: @xvsubwod_h_bu(
3362 // CHECK-NEXT: entry:
3363 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3364 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3366 v16i16 xvsubwod_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvsubwod_h_bu(_1, _2); }
3367 // CHECK-LABEL: @xvmulwod_q_d(
3368 // CHECK-NEXT: entry:
3369 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3370 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3372 v4i64 xvmulwod_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvmulwod_q_d(_1, _2); }
3373 // CHECK-LABEL: @xvmulwod_d_w(
3374 // CHECK-NEXT: entry:
3375 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3376 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3378 v4i64 xvmulwod_d_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvmulwod_d_w(_1, _2); }
3379 // CHECK-LABEL: @xvmulwod_w_h(
3380 // CHECK-NEXT: entry:
3381 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3382 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3384 v8i32 xvmulwod_w_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvmulwod_w_h(_1, _2); }
3385 // CHECK-LABEL: @xvmulwod_h_b(
3386 // CHECK-NEXT: entry:
3387 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3388 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3390 v16i16 xvmulwod_h_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvmulwod_h_b(_1, _2); }
3391 // CHECK-LABEL: @xvmulwod_q_du(
3392 // CHECK-NEXT: entry:
3393 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3394 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3396 v4i64 xvmulwod_q_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvmulwod_q_du(_1, _2); }
3397 // CHECK-LABEL: @xvmulwod_d_wu(
3398 // CHECK-NEXT: entry:
3399 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3400 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3402 v4i64 xvmulwod_d_wu(v8u32 _1, v8u32 _2) { return __builtin_lasx_xvmulwod_d_wu(_1, _2); }
3403 // CHECK-LABEL: @xvmulwod_w_hu(
3404 // CHECK-NEXT: entry:
3405 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3406 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3408 v8i32 xvmulwod_w_hu(v16u16 _1, v16u16 _2) { return __builtin_lasx_xvmulwod_w_hu(_1, _2); }
3409 // CHECK-LABEL: @xvmulwod_h_bu(
3410 // CHECK-NEXT: entry:
3411 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3412 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3414 v16i16 xvmulwod_h_bu(v32u8 _1, v32u8 _2) { return __builtin_lasx_xvmulwod_h_bu(_1, _2); }
3415 // CHECK-LABEL: @xvaddwev_d_wu_w(
3416 // CHECK-NEXT: entry:
3417 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3418 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3420 v4i64 xvaddwev_d_wu_w(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvaddwev_d_wu_w(_1, _2); }
3421 // CHECK-LABEL: @xvaddwev_w_hu_h(
3422 // CHECK-NEXT: entry:
3423 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3424 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3426 v8i32 xvaddwev_w_hu_h(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvaddwev_w_hu_h(_1, _2); }
3427 // CHECK-LABEL: @xvaddwev_h_bu_b(
3428 // CHECK-NEXT: entry:
3429 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3430 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3432 v16i16 xvaddwev_h_bu_b(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvaddwev_h_bu_b(_1, _2); }
3433 // CHECK-LABEL: @xvmulwev_d_wu_w(
3434 // CHECK-NEXT: entry:
3435 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3436 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3438 v4i64 xvmulwev_d_wu_w(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvmulwev_d_wu_w(_1, _2); }
3439 // CHECK-LABEL: @xvmulwev_w_hu_h(
3440 // CHECK-NEXT: entry:
3441 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3442 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3444 v8i32 xvmulwev_w_hu_h(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvmulwev_w_hu_h(_1, _2); }
3445 // CHECK-LABEL: @xvmulwev_h_bu_b(
3446 // CHECK-NEXT: entry:
3447 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3448 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3450 v16i16 xvmulwev_h_bu_b(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvmulwev_h_bu_b(_1, _2); }
3451 // CHECK-LABEL: @xvaddwod_d_wu_w(
3452 // CHECK-NEXT: entry:
3453 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3454 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3456 v4i64 xvaddwod_d_wu_w(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvaddwod_d_wu_w(_1, _2); }
3457 // CHECK-LABEL: @xvaddwod_w_hu_h(
3458 // CHECK-NEXT: entry:
3459 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3460 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3462 v8i32 xvaddwod_w_hu_h(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvaddwod_w_hu_h(_1, _2); }
3463 // CHECK-LABEL: @xvaddwod_h_bu_b(
3464 // CHECK-NEXT: entry:
3465 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3466 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3468 v16i16 xvaddwod_h_bu_b(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvaddwod_h_bu_b(_1, _2); }
3469 // CHECK-LABEL: @xvmulwod_d_wu_w(
3470 // CHECK-NEXT: entry:
3471 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3472 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3474 v4i64 xvmulwod_d_wu_w(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvmulwod_d_wu_w(_1, _2); }
3475 // CHECK-LABEL: @xvmulwod_w_hu_h(
3476 // CHECK-NEXT: entry:
3477 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3478 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3480 v8i32 xvmulwod_w_hu_h(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvmulwod_w_hu_h(_1, _2); }
3481 // CHECK-LABEL: @xvmulwod_h_bu_b(
3482 // CHECK-NEXT: entry:
3483 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3484 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3486 v16i16 xvmulwod_h_bu_b(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvmulwod_h_bu_b(_1, _2); }
3487 // CHECK-LABEL: @xvhaddw_q_d(
3488 // CHECK-NEXT: entry:
3489 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhaddw.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3490 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3492 v4i64 xvhaddw_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvhaddw_q_d(_1, _2); }
3493 // CHECK-LABEL: @xvhaddw_qu_du(
3494 // CHECK-NEXT: entry:
3495 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhaddw.qu.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3496 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3498 v4u64 xvhaddw_qu_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvhaddw_qu_du(_1, _2); }
3499 // CHECK-LABEL: @xvhsubw_q_d(
3500 // CHECK-NEXT: entry:
3501 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhsubw.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3502 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3504 v4i64 xvhsubw_q_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvhsubw_q_d(_1, _2); }
3505 // CHECK-LABEL: @xvhsubw_qu_du(
3506 // CHECK-NEXT: entry:
3507 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvhsubw.qu.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3508 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3510 v4u64 xvhsubw_qu_du(v4u64 _1, v4u64 _2) { return __builtin_lasx_xvhsubw_qu_du(_1, _2); }
3511 // CHECK-LABEL: @xvmaddwev_q_d(
3512 // CHECK-NEXT: entry:
3513 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3514 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3516 v4i64 xvmaddwev_q_d(v4i64 _1, v4i64 _2, v4i64 _3) { return __builtin_lasx_xvmaddwev_q_d(_1, _2, _3); }
3517 // CHECK-LABEL: @xvmaddwev_d_w(
3518 // CHECK-NEXT: entry:
3519 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.w(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3520 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3522 v4i64 xvmaddwev_d_w(v4i64 _1, v8i32 _2, v8i32 _3) { return __builtin_lasx_xvmaddwev_d_w(_1, _2, _3); }
3523 // CHECK-LABEL: @xvmaddwev_w_h(
3524 // CHECK-NEXT: entry:
3525 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.h(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3526 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3528 v8i32 xvmaddwev_w_h(v8i32 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvmaddwev_w_h(_1, _2, _3); }
3529 // CHECK-LABEL: @xvmaddwev_h_b(
3530 // CHECK-NEXT: entry:
3531 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.b(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3532 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3534 v16i16 xvmaddwev_h_b(v16i16 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvmaddwev_h_b(_1, _2, _3); }
3535 // CHECK-LABEL: @xvmaddwev_q_du(
3536 // CHECK-NEXT: entry:
3537 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3538 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3540 v4u64 xvmaddwev_q_du(v4u64 _1, v4u64 _2, v4u64 _3) { return __builtin_lasx_xvmaddwev_q_du(_1, _2, _3); }
3541 // CHECK-LABEL: @xvmaddwev_d_wu(
3542 // CHECK-NEXT: entry:
3543 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3544 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3546 v4u64 xvmaddwev_d_wu(v4u64 _1, v8u32 _2, v8u32 _3) { return __builtin_lasx_xvmaddwev_d_wu(_1, _2, _3); }
3547 // CHECK-LABEL: @xvmaddwev_w_hu(
3548 // CHECK-NEXT: entry:
3549 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3550 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3552 v8u32 xvmaddwev_w_hu(v8u32 _1, v16u16 _2, v16u16 _3) { return __builtin_lasx_xvmaddwev_w_hu(_1, _2, _3); }
3553 // CHECK-LABEL: @xvmaddwev_h_bu(
3554 // CHECK-NEXT: entry:
3555 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3556 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3558 v16u16 xvmaddwev_h_bu(v16u16 _1, v32u8 _2, v32u8 _3) { return __builtin_lasx_xvmaddwev_h_bu(_1, _2, _3); }
3559 // CHECK-LABEL: @xvmaddwod_q_d(
3560 // CHECK-NEXT: entry:
3561 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3562 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3564 v4i64 xvmaddwod_q_d(v4i64 _1, v4i64 _2, v4i64 _3) { return __builtin_lasx_xvmaddwod_q_d(_1, _2, _3); }
3565 // CHECK-LABEL: @xvmaddwod_d_w(
3566 // CHECK-NEXT: entry:
3567 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.w(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3568 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3570 v4i64 xvmaddwod_d_w(v4i64 _1, v8i32 _2, v8i32 _3) { return __builtin_lasx_xvmaddwod_d_w(_1, _2, _3); }
3571 // CHECK-LABEL: @xvmaddwod_w_h(
3572 // CHECK-NEXT: entry:
3573 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.h(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3574 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3576 v8i32 xvmaddwod_w_h(v8i32 _1, v16i16 _2, v16i16 _3) { return __builtin_lasx_xvmaddwod_w_h(_1, _2, _3); }
3577 // CHECK-LABEL: @xvmaddwod_h_b(
3578 // CHECK-NEXT: entry:
3579 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.b(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3580 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3582 v16i16 xvmaddwod_h_b(v16i16 _1, v32i8 _2, v32i8 _3) { return __builtin_lasx_xvmaddwod_h_b(_1, _2, _3); }
3583 // CHECK-LABEL: @xvmaddwod_q_du(
3584 // CHECK-NEXT: entry:
3585 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3586 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3588 v4u64 xvmaddwod_q_du(v4u64 _1, v4u64 _2, v4u64 _3) { return __builtin_lasx_xvmaddwod_q_du(_1, _2, _3); }
3589 // CHECK-LABEL: @xvmaddwod_d_wu(
3590 // CHECK-NEXT: entry:
3591 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3592 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3594 v4u64 xvmaddwod_d_wu(v4u64 _1, v8u32 _2, v8u32 _3) { return __builtin_lasx_xvmaddwod_d_wu(_1, _2, _3); }
3595 // CHECK-LABEL: @xvmaddwod_w_hu(
3596 // CHECK-NEXT: entry:
3597 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3598 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3600 v8u32 xvmaddwod_w_hu(v8u32 _1, v16u16 _2, v16u16 _3) { return __builtin_lasx_xvmaddwod_w_hu(_1, _2, _3); }
3601 // CHECK-LABEL: @xvmaddwod_h_bu(
3602 // CHECK-NEXT: entry:
3603 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3604 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3606 v16u16 xvmaddwod_h_bu(v16u16 _1, v32u8 _2, v32u8 _3) { return __builtin_lasx_xvmaddwod_h_bu(_1, _2, _3); }
3607 // CHECK-LABEL: @xvmaddwev_q_du_d(
3608 // CHECK-NEXT: entry:
3609 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3610 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3612 v4i64 xvmaddwev_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3) { return __builtin_lasx_xvmaddwev_q_du_d(_1, _2, _3); }
3613 // CHECK-LABEL: @xvmaddwev_d_wu_w(
3614 // CHECK-NEXT: entry:
3615 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu.w(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3616 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3618 v4i64 xvmaddwev_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3) { return __builtin_lasx_xvmaddwev_d_wu_w(_1, _2, _3); }
3619 // CHECK-LABEL: @xvmaddwev_w_hu_h(
3620 // CHECK-NEXT: entry:
3621 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu.h(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3622 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3624 v8i32 xvmaddwev_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3) { return __builtin_lasx_xvmaddwev_w_hu_h(_1, _2, _3); }
3625 // CHECK-LABEL: @xvmaddwev_h_bu_b(
3626 // CHECK-NEXT: entry:
3627 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu.b(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3628 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3630 v16i16 xvmaddwev_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3) { return __builtin_lasx_xvmaddwev_h_bu_b(_1, _2, _3); }
3631 // CHECK-LABEL: @xvmaddwod_q_du_d(
3632 // CHECK-NEXT: entry:
3633 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], <4 x i64> [[_3:%.*]])
3634 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3636 v4i64 xvmaddwod_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3) { return __builtin_lasx_xvmaddwod_q_du_d(_1, _2, _3); }
3637 // CHECK-LABEL: @xvmaddwod_d_wu_w(
3638 // CHECK-NEXT: entry:
3639 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu.w(<4 x i64> [[_1:%.*]], <8 x i32> [[_2:%.*]], <8 x i32> [[_3:%.*]])
3640 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3642 v4i64 xvmaddwod_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3) { return __builtin_lasx_xvmaddwod_d_wu_w(_1, _2, _3); }
3643 // CHECK-LABEL: @xvmaddwod_w_hu_h(
3644 // CHECK-NEXT: entry:
3645 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu.h(<8 x i32> [[_1:%.*]], <16 x i16> [[_2:%.*]], <16 x i16> [[_3:%.*]])
3646 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3648 v8i32 xvmaddwod_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3) { return __builtin_lasx_xvmaddwod_w_hu_h(_1, _2, _3); }
3649 // CHECK-LABEL: @xvmaddwod_h_bu_b(
3650 // CHECK-NEXT: entry:
3651 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu.b(<16 x i16> [[_1:%.*]], <32 x i8> [[_2:%.*]], <32 x i8> [[_3:%.*]])
3652 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3654 v16i16 xvmaddwod_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3) { return __builtin_lasx_xvmaddwod_h_bu_b(_1, _2, _3); }
3655 // CHECK-LABEL: @xvrotr_b(
3656 // CHECK-NEXT: entry:
3657 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvrotr.b(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]])
3658 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3660 v32i8 xvrotr_b(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvrotr_b(_1, _2); }
3661 // CHECK-LABEL: @xvrotr_h(
3662 // CHECK-NEXT: entry:
3663 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvrotr.h(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]])
3664 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3666 v16i16 xvrotr_h(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvrotr_h(_1, _2); }
3667 // CHECK-LABEL: @xvrotr_w(
3668 // CHECK-NEXT: entry:
3669 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvrotr.w(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]])
3670 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3672 v8i32 xvrotr_w(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvrotr_w(_1, _2); }
3673 // CHECK-LABEL: @xvrotr_d(
3674 // CHECK-NEXT: entry:
3675 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvrotr.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3676 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3678 v4i64 xvrotr_d(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvrotr_d(_1, _2); }
3679 // CHECK-LABEL: @xvadd_q(
3680 // CHECK-NEXT: entry:
3681 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvadd.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3682 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3684 v4i64 xvadd_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvadd_q(_1, _2); }
3685 // CHECK-LABEL: @xvsub_q(
3686 // CHECK-NEXT: entry:
3687 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsub.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3688 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3690 v4i64 xvsub_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsub_q(_1, _2); }
3691 // CHECK-LABEL: @xvaddwev_q_du_d(
3692 // CHECK-NEXT: entry:
3693 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3694 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3696 v4i64 xvaddwev_q_du_d(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvaddwev_q_du_d(_1, _2); }
3697 // CHECK-LABEL: @xvaddwod_q_du_d(
3698 // CHECK-NEXT: entry:
3699 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3700 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3702 v4i64 xvaddwod_q_du_d(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvaddwod_q_du_d(_1, _2); }
3703 // CHECK-LABEL: @xvmulwev_q_du_d(
3704 // CHECK-NEXT: entry:
3705 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3706 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3708 v4i64 xvmulwev_q_du_d(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvmulwev_q_du_d(_1, _2); }
3709 // CHECK-LABEL: @xvmulwod_q_du_d(
3710 // CHECK-NEXT: entry:
3711 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du.d(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]])
3712 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3714 v4i64 xvmulwod_q_du_d(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvmulwod_q_du_d(_1, _2); }
3715 // CHECK-LABEL: @xvmskgez_b(
3716 // CHECK-NEXT: entry:
3717 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmskgez.b(<32 x i8> [[_1:%.*]])
3718 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3720 v32i8 xvmskgez_b(v32i8 _1) { return __builtin_lasx_xvmskgez_b(_1); }
3721 // CHECK-LABEL: @xvmsknz_b(
3722 // CHECK-NEXT: entry:
3723 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvmsknz.b(<32 x i8> [[_1:%.*]])
3724 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3726 v32i8 xvmsknz_b(v32i8 _1) { return __builtin_lasx_xvmsknz_b(_1); }
3727 // CHECK-LABEL: @xvexth_h_b(
3728 // CHECK-NEXT: entry:
3729 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvexth.h.b(<32 x i8> [[_1:%.*]])
3730 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3732 v16i16 xvexth_h_b(v32i8 _1) { return __builtin_lasx_xvexth_h_b(_1); }
3733 // CHECK-LABEL: @xvexth_w_h(
3734 // CHECK-NEXT: entry:
3735 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvexth.w.h(<16 x i16> [[_1:%.*]])
3736 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3738 v8i32 xvexth_w_h(v16i16 _1) { return __builtin_lasx_xvexth_w_h(_1); }
3739 // CHECK-LABEL: @xvexth_d_w(
3740 // CHECK-NEXT: entry:
3741 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvexth.d.w(<8 x i32> [[_1:%.*]])
3742 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3744 v4i64 xvexth_d_w(v8i32 _1) { return __builtin_lasx_xvexth_d_w(_1); }
3745 // CHECK-LABEL: @xvexth_q_d(
3746 // CHECK-NEXT: entry:
3747 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvexth.q.d(<4 x i64> [[_1:%.*]])
3748 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3750 v4i64 xvexth_q_d(v4i64 _1) { return __builtin_lasx_xvexth_q_d(_1); }
3751 // CHECK-LABEL: @xvexth_hu_bu(
3752 // CHECK-NEXT: entry:
3753 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvexth.hu.bu(<32 x i8> [[_1:%.*]])
3754 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3756 v16u16 xvexth_hu_bu(v32u8 _1) { return __builtin_lasx_xvexth_hu_bu(_1); }
3757 // CHECK-LABEL: @xvexth_wu_hu(
3758 // CHECK-NEXT: entry:
3759 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvexth.wu.hu(<16 x i16> [[_1:%.*]])
3760 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3762 v8u32 xvexth_wu_hu(v16u16 _1) { return __builtin_lasx_xvexth_wu_hu(_1); }
3763 // CHECK-LABEL: @xvexth_du_wu(
3764 // CHECK-NEXT: entry:
3765 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvexth.du.wu(<8 x i32> [[_1:%.*]])
3766 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3768 v4u64 xvexth_du_wu(v8u32 _1) { return __builtin_lasx_xvexth_du_wu(_1); }
3769 // CHECK-LABEL: @xvexth_qu_du(
3770 // CHECK-NEXT: entry:
3771 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvexth.qu.du(<4 x i64> [[_1:%.*]])
3772 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3774 v4u64 xvexth_qu_du(v4u64 _1) { return __builtin_lasx_xvexth_qu_du(_1); }
3775 // CHECK-LABEL: @xvrotri_b(
3776 // CHECK-NEXT: entry:
3777 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvrotri.b(<32 x i8> [[_1:%.*]], i32 1)
3778 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3780 v32i8 xvrotri_b(v32i8 _1) { return __builtin_lasx_xvrotri_b(_1, 1); }
3781 // CHECK-LABEL: @xvrotri_h(
3782 // CHECK-NEXT: entry:
3783 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvrotri.h(<16 x i16> [[_1:%.*]], i32 1)
3784 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3786 v16i16 xvrotri_h(v16i16 _1) { return __builtin_lasx_xvrotri_h(_1, 1); }
3787 // CHECK-LABEL: @xvrotri_w(
3788 // CHECK-NEXT: entry:
3789 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvrotri.w(<8 x i32> [[_1:%.*]], i32 1)
3790 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3792 v8i32 xvrotri_w(v8i32 _1) { return __builtin_lasx_xvrotri_w(_1, 1); }
3793 // CHECK-LABEL: @xvrotri_d(
3794 // CHECK-NEXT: entry:
3795 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvrotri.d(<4 x i64> [[_1:%.*]], i32 1)
3796 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3798 v4i64 xvrotri_d(v4i64 _1) { return __builtin_lasx_xvrotri_d(_1, 1); }
3799 // CHECK-LABEL: @xvextl_q_d(
3800 // CHECK-NEXT: entry:
3801 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvextl.q.d(<4 x i64> [[_1:%.*]])
3802 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3804 v4i64 xvextl_q_d(v4i64 _1) { return __builtin_lasx_xvextl_q_d(_1); }
3805 // CHECK-LABEL: @xvsrlni_b_h(
3806 // CHECK-NEXT: entry:
3807 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrlni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3808 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3810 v32i8 xvsrlni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrlni_b_h(_1, _2, 1); }
3811 // CHECK-LABEL: @xvsrlni_h_w(
3812 // CHECK-NEXT: entry:
3813 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrlni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3814 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3816 v16i16 xvsrlni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrlni_h_w(_1, _2, 1); }
3817 // CHECK-LABEL: @xvsrlni_w_d(
3818 // CHECK-NEXT: entry:
3819 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrlni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3820 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3822 v8i32 xvsrlni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrlni_w_d(_1, _2, 1); }
3823 // CHECK-LABEL: @xvsrlni_d_q(
3824 // CHECK-NEXT: entry:
3825 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrlni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3826 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3828 v4i64 xvsrlni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrlni_d_q(_1, _2, 1); }
3829 // CHECK-LABEL: @xvsrlrni_b_h(
3830 // CHECK-NEXT: entry:
3831 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrlrni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3832 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3834 v32i8 xvsrlrni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrlrni_b_h(_1, _2, 1); }
3835 // CHECK-LABEL: @xvsrlrni_h_w(
3836 // CHECK-NEXT: entry:
3837 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrlrni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3838 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3840 v16i16 xvsrlrni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrlrni_h_w(_1, _2, 1); }
3841 // CHECK-LABEL: @xvsrlrni_w_d(
3842 // CHECK-NEXT: entry:
3843 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrlrni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3844 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3846 v8i32 xvsrlrni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrlrni_w_d(_1, _2, 1); }
3847 // CHECK-LABEL: @xvsrlrni_d_q(
3848 // CHECK-NEXT: entry:
3849 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrlrni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3850 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3852 v4i64 xvsrlrni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrlrni_d_q(_1, _2, 1); }
3853 // CHECK-LABEL: @xvssrlni_b_h(
3854 // CHECK-NEXT: entry:
3855 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3856 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3858 v32i8 xvssrlni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvssrlni_b_h(_1, _2, 1); }
3859 // CHECK-LABEL: @xvssrlni_h_w(
3860 // CHECK-NEXT: entry:
3861 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3862 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3864 v16i16 xvssrlni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrlni_h_w(_1, _2, 1); }
3865 // CHECK-LABEL: @xvssrlni_w_d(
3866 // CHECK-NEXT: entry:
3867 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3868 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3870 v8i32 xvssrlni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrlni_w_d(_1, _2, 1); }
3871 // CHECK-LABEL: @xvssrlni_d_q(
3872 // CHECK-NEXT: entry:
3873 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrlni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3874 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3876 v4i64 xvssrlni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrlni_d_q(_1, _2, 1); }
3877 // CHECK-LABEL: @xvssrlni_bu_h(
3878 // CHECK-NEXT: entry:
3879 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlni.bu.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3880 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3882 v32u8 xvssrlni_bu_h(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvssrlni_bu_h(_1, _2, 1); }
3883 // CHECK-LABEL: @xvssrlni_hu_w(
3884 // CHECK-NEXT: entry:
3885 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlni.hu.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3886 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3888 v16u16 xvssrlni_hu_w(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvssrlni_hu_w(_1, _2, 1); }
3889 // CHECK-LABEL: @xvssrlni_wu_d(
3890 // CHECK-NEXT: entry:
3891 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlni.wu.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3892 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3894 v8u32 xvssrlni_wu_d(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvssrlni_wu_d(_1, _2, 1); }
3895 // CHECK-LABEL: @xvssrlni_du_q(
3896 // CHECK-NEXT: entry:
3897 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrlni.du.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3898 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3900 v4u64 xvssrlni_du_q(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvssrlni_du_q(_1, _2, 1); }
3901 // CHECK-LABEL: @xvssrlrni_b_h(
3902 // CHECK-NEXT: entry:
3903 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlrni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3904 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3906 v32i8 xvssrlrni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvssrlrni_b_h(_1, _2, 1); }
3907 // CHECK-LABEL: @xvssrlrni_h_w(
3908 // CHECK-NEXT: entry:
3909 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlrni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3910 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3912 v16i16 xvssrlrni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrlrni_h_w(_1, _2, 1); }
3913 // CHECK-LABEL: @xvssrlrni_w_d(
3914 // CHECK-NEXT: entry:
3915 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlrni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3916 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3918 v8i32 xvssrlrni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrlrni_w_d(_1, _2, 1); }
3919 // CHECK-LABEL: @xvssrlrni_d_q(
3920 // CHECK-NEXT: entry:
3921 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrlrni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3922 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3924 v4i64 xvssrlrni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrlrni_d_q(_1, _2, 1); }
3925 // CHECK-LABEL: @xvssrlrni_bu_h(
3926 // CHECK-NEXT: entry:
3927 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrlrni.bu.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3928 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3930 v32u8 xvssrlrni_bu_h(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvssrlrni_bu_h(_1, _2, 1); }
3931 // CHECK-LABEL: @xvssrlrni_hu_w(
3932 // CHECK-NEXT: entry:
3933 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrlrni.hu.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3934 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3936 v16u16 xvssrlrni_hu_w(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvssrlrni_hu_w(_1, _2, 1); }
3937 // CHECK-LABEL: @xvssrlrni_wu_d(
3938 // CHECK-NEXT: entry:
3939 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrlrni.wu.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3940 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3942 v8u32 xvssrlrni_wu_d(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvssrlrni_wu_d(_1, _2, 1); }
3943 // CHECK-LABEL: @xvssrlrni_du_q(
3944 // CHECK-NEXT: entry:
3945 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrlrni.du.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3946 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3948 v4u64 xvssrlrni_du_q(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvssrlrni_du_q(_1, _2, 1); }
3949 // CHECK-LABEL: @xvsrani_b_h(
3950 // CHECK-NEXT: entry:
3951 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrani.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3952 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3954 v32i8 xvsrani_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrani_b_h(_1, _2, 1); }
3955 // CHECK-LABEL: @xvsrani_h_w(
3956 // CHECK-NEXT: entry:
3957 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrani.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3958 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3960 v16i16 xvsrani_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrani_h_w(_1, _2, 1); }
3961 // CHECK-LABEL: @xvsrani_w_d(
3962 // CHECK-NEXT: entry:
3963 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrani.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3964 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3966 v8i32 xvsrani_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrani_w_d(_1, _2, 1); }
3967 // CHECK-LABEL: @xvsrani_d_q(
3968 // CHECK-NEXT: entry:
3969 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrani.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3970 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3972 v4i64 xvsrani_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrani_d_q(_1, _2, 1); }
3973 // CHECK-LABEL: @xvsrarni_b_h(
3974 // CHECK-NEXT: entry:
3975 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvsrarni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
3976 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
3978 v32i8 xvsrarni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvsrarni_b_h(_1, _2, 1); }
3979 // CHECK-LABEL: @xvsrarni_h_w(
3980 // CHECK-NEXT: entry:
3981 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvsrarni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
3982 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
3984 v16i16 xvsrarni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvsrarni_h_w(_1, _2, 1); }
3985 // CHECK-LABEL: @xvsrarni_w_d(
3986 // CHECK-NEXT: entry:
3987 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvsrarni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
3988 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
3990 v8i32 xvsrarni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvsrarni_w_d(_1, _2, 1); }
3991 // CHECK-LABEL: @xvsrarni_d_q(
3992 // CHECK-NEXT: entry:
3993 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvsrarni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
3994 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
3996 v4i64 xvsrarni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvsrarni_d_q(_1, _2, 1); }
3997 // CHECK-LABEL: @xvssrani_b_h(
3998 // CHECK-NEXT: entry:
3999 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrani.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
4000 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
4002 v32i8 xvssrani_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvssrani_b_h(_1, _2, 1); }
4003 // CHECK-LABEL: @xvssrani_h_w(
4004 // CHECK-NEXT: entry:
4005 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrani.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
4006 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
4008 v16i16 xvssrani_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrani_h_w(_1, _2, 1); }
4009 // CHECK-LABEL: @xvssrani_w_d(
4010 // CHECK-NEXT: entry:
4011 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrani.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
4012 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4014 v8i32 xvssrani_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrani_w_d(_1, _2, 1); }
4015 // CHECK-LABEL: @xvssrani_d_q(
4016 // CHECK-NEXT: entry:
4017 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrani.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
4018 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4020 v4i64 xvssrani_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrani_d_q(_1, _2, 1); }
4021 // CHECK-LABEL: @xvssrani_bu_h(
4022 // CHECK-NEXT: entry:
4023 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrani.bu.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
4024 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
4026 v32u8 xvssrani_bu_h(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvssrani_bu_h(_1, _2, 1); }
4027 // CHECK-LABEL: @xvssrani_hu_w(
4028 // CHECK-NEXT: entry:
4029 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrani.hu.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
4030 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
4032 v16u16 xvssrani_hu_w(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvssrani_hu_w(_1, _2, 1); }
4033 // CHECK-LABEL: @xvssrani_wu_d(
4034 // CHECK-NEXT: entry:
4035 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrani.wu.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
4036 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4038 v8u32 xvssrani_wu_d(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvssrani_wu_d(_1, _2, 1); }
4039 // CHECK-LABEL: @xvssrani_du_q(
4040 // CHECK-NEXT: entry:
4041 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrani.du.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
4042 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4044 v4u64 xvssrani_du_q(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvssrani_du_q(_1, _2, 1); }
4045 // CHECK-LABEL: @xvssrarni_b_h(
4046 // CHECK-NEXT: entry:
4047 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrarni.b.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
4048 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
4050 v32i8 xvssrarni_b_h(v32i8 _1, v32i8 _2) { return __builtin_lasx_xvssrarni_b_h(_1, _2, 1); }
4051 // CHECK-LABEL: @xvssrarni_h_w(
4052 // CHECK-NEXT: entry:
4053 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrarni.h.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
4054 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
4056 v16i16 xvssrarni_h_w(v16i16 _1, v16i16 _2) { return __builtin_lasx_xvssrarni_h_w(_1, _2, 1); }
4057 // CHECK-LABEL: @xvssrarni_w_d(
4058 // CHECK-NEXT: entry:
4059 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrarni.w.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
4060 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4062 v8i32 xvssrarni_w_d(v8i32 _1, v8i32 _2) { return __builtin_lasx_xvssrarni_w_d(_1, _2, 1); }
4063 // CHECK-LABEL: @xvssrarni_d_q(
4064 // CHECK-NEXT: entry:
4065 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrarni.d.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
4066 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4068 v4i64 xvssrarni_d_q(v4i64 _1, v4i64 _2) { return __builtin_lasx_xvssrarni_d_q(_1, _2, 1); }
4069 // CHECK-LABEL: @xvssrarni_bu_h(
4070 // CHECK-NEXT: entry:
4071 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvssrarni.bu.h(<32 x i8> [[_1:%.*]], <32 x i8> [[_2:%.*]], i32 1)
4072 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
4074 v32u8 xvssrarni_bu_h(v32u8 _1, v32i8 _2) { return __builtin_lasx_xvssrarni_bu_h(_1, _2, 1); }
4075 // CHECK-LABEL: @xvssrarni_hu_w(
4076 // CHECK-NEXT: entry:
4077 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvssrarni.hu.w(<16 x i16> [[_1:%.*]], <16 x i16> [[_2:%.*]], i32 1)
4078 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
4080 v16u16 xvssrarni_hu_w(v16u16 _1, v16i16 _2) { return __builtin_lasx_xvssrarni_hu_w(_1, _2, 1); }
4081 // CHECK-LABEL: @xvssrarni_wu_d(
4082 // CHECK-NEXT: entry:
4083 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvssrarni.wu.d(<8 x i32> [[_1:%.*]], <8 x i32> [[_2:%.*]], i32 1)
4084 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4086 v8u32 xvssrarni_wu_d(v8u32 _1, v8i32 _2) { return __builtin_lasx_xvssrarni_wu_d(_1, _2, 1); }
4087 // CHECK-LABEL: @xvssrarni_du_q(
4088 // CHECK-NEXT: entry:
4089 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvssrarni.du.q(<4 x i64> [[_1:%.*]], <4 x i64> [[_2:%.*]], i32 1)
4090 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4092 v4u64 xvssrarni_du_q(v4u64 _1, v4i64 _2) { return __builtin_lasx_xvssrarni_du_q(_1, _2, 1); }
4093 // CHECK-LABEL: @xbnz_b(
4094 // CHECK-NEXT: entry:
4095 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbnz.b(<32 x i8> [[_1:%.*]])
4096 // CHECK-NEXT: ret i32 [[TMP0]]
4098 int xbnz_b(v32u8 _1) { return __builtin_lasx_xbnz_b(_1); }
4099 // CHECK-LABEL: @xbnz_d(
4100 // CHECK-NEXT: entry:
4101 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbnz.d(<4 x i64> [[_1:%.*]])
4102 // CHECK-NEXT: ret i32 [[TMP0]]
4104 int xbnz_d(v4u64 _1) { return __builtin_lasx_xbnz_d(_1); }
4105 // CHECK-LABEL: @xbnz_h(
4106 // CHECK-NEXT: entry:
4107 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbnz.h(<16 x i16> [[_1:%.*]])
4108 // CHECK-NEXT: ret i32 [[TMP0]]
4110 int xbnz_h(v16u16 _1) { return __builtin_lasx_xbnz_h(_1); }
4111 // CHECK-LABEL: @xbnz_v(
4112 // CHECK-NEXT: entry:
4113 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbnz.v(<32 x i8> [[_1:%.*]])
4114 // CHECK-NEXT: ret i32 [[TMP0]]
4116 int xbnz_v(v32u8 _1) { return __builtin_lasx_xbnz_v(_1); }
4117 // CHECK-LABEL: @xbnz_w(
4118 // CHECK-NEXT: entry:
4119 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbnz.w(<8 x i32> [[_1:%.*]])
4120 // CHECK-NEXT: ret i32 [[TMP0]]
4122 int xbnz_w(v8u32 _1) { return __builtin_lasx_xbnz_w(_1); }
4123 // CHECK-LABEL: @xbz_b(
4124 // CHECK-NEXT: entry:
4125 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbz.b(<32 x i8> [[_1:%.*]])
4126 // CHECK-NEXT: ret i32 [[TMP0]]
4128 int xbz_b(v32u8 _1) { return __builtin_lasx_xbz_b(_1); }
4129 // CHECK-LABEL: @xbz_d(
4130 // CHECK-NEXT: entry:
4131 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbz.d(<4 x i64> [[_1:%.*]])
4132 // CHECK-NEXT: ret i32 [[TMP0]]
4134 int xbz_d(v4u64 _1) { return __builtin_lasx_xbz_d(_1); }
4135 // CHECK-LABEL: @xbz_h(
4136 // CHECK-NEXT: entry:
4137 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbz.h(<16 x i16> [[_1:%.*]])
4138 // CHECK-NEXT: ret i32 [[TMP0]]
4140 int xbz_h(v16u16 _1) { return __builtin_lasx_xbz_h(_1); }
4141 // CHECK-LABEL: @xbz_v(
4142 // CHECK-NEXT: entry:
4143 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbz.v(<32 x i8> [[_1:%.*]])
4144 // CHECK-NEXT: ret i32 [[TMP0]]
4146 int xbz_v(v32u8 _1) { return __builtin_lasx_xbz_v(_1); }
4147 // CHECK-LABEL: @xbz_w(
4148 // CHECK-NEXT: entry:
4149 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.lasx.xbz.w(<8 x i32> [[_1:%.*]])
4150 // CHECK-NEXT: ret i32 [[TMP0]]
4152 int xbz_w(v8u32 _1) { return __builtin_lasx_xbz_w(_1); }
4153 // CHECK-LABEL: @xvfcmp_caf_d(
4154 // CHECK-NEXT: entry:
4155 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.caf.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4156 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4158 v4i64 xvfcmp_caf_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_caf_d(_1, _2); }
4159 // CHECK-LABEL: @xvfcmp_caf_s(
4160 // CHECK-NEXT: entry:
4161 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.caf.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4162 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4164 v8i32 xvfcmp_caf_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_caf_s(_1, _2); }
4165 // CHECK-LABEL: @xvfcmp_ceq_d(
4166 // CHECK-NEXT: entry:
4167 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.ceq.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4168 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4170 v4i64 xvfcmp_ceq_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_ceq_d(_1, _2); }
4171 // CHECK-LABEL: @xvfcmp_ceq_s(
4172 // CHECK-NEXT: entry:
4173 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.ceq.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4174 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4176 v8i32 xvfcmp_ceq_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_ceq_s(_1, _2); }
4177 // CHECK-LABEL: @xvfcmp_cle_d(
4178 // CHECK-NEXT: entry:
4179 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cle.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4180 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4182 v4i64 xvfcmp_cle_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cle_d(_1, _2); }
4183 // CHECK-LABEL: @xvfcmp_cle_s(
4184 // CHECK-NEXT: entry:
4185 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cle.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4186 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4188 v8i32 xvfcmp_cle_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cle_s(_1, _2); }
4189 // CHECK-LABEL: @xvfcmp_clt_d(
4190 // CHECK-NEXT: entry:
4191 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.clt.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4192 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4194 v4i64 xvfcmp_clt_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_clt_d(_1, _2); }
4195 // CHECK-LABEL: @xvfcmp_clt_s(
4196 // CHECK-NEXT: entry:
4197 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.clt.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4198 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4200 v8i32 xvfcmp_clt_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_clt_s(_1, _2); }
4201 // CHECK-LABEL: @xvfcmp_cne_d(
4202 // CHECK-NEXT: entry:
4203 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cne.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4204 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4206 v4i64 xvfcmp_cne_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cne_d(_1, _2); }
4207 // CHECK-LABEL: @xvfcmp_cne_s(
4208 // CHECK-NEXT: entry:
4209 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cne.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4210 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4212 v8i32 xvfcmp_cne_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cne_s(_1, _2); }
4213 // CHECK-LABEL: @xvfcmp_cor_d(
4214 // CHECK-NEXT: entry:
4215 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cor.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4216 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4218 v4i64 xvfcmp_cor_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cor_d(_1, _2); }
4219 // CHECK-LABEL: @xvfcmp_cor_s(
4220 // CHECK-NEXT: entry:
4221 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cor.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4222 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4224 v8i32 xvfcmp_cor_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cor_s(_1, _2); }
4225 // CHECK-LABEL: @xvfcmp_cueq_d(
4226 // CHECK-NEXT: entry:
4227 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cueq.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4228 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4230 v4i64 xvfcmp_cueq_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cueq_d(_1, _2); }
4231 // CHECK-LABEL: @xvfcmp_cueq_s(
4232 // CHECK-NEXT: entry:
4233 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cueq.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4234 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4236 v8i32 xvfcmp_cueq_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cueq_s(_1, _2); }
4237 // CHECK-LABEL: @xvfcmp_cule_d(
4238 // CHECK-NEXT: entry:
4239 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cule.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4240 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4242 v4i64 xvfcmp_cule_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cule_d(_1, _2); }
4243 // CHECK-LABEL: @xvfcmp_cule_s(
4244 // CHECK-NEXT: entry:
4245 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cule.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4246 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4248 v8i32 xvfcmp_cule_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cule_s(_1, _2); }
4249 // CHECK-LABEL: @xvfcmp_cult_d(
4250 // CHECK-NEXT: entry:
4251 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cult.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4252 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4254 v4i64 xvfcmp_cult_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cult_d(_1, _2); }
4255 // CHECK-LABEL: @xvfcmp_cult_s(
4256 // CHECK-NEXT: entry:
4257 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cult.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4258 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4260 v8i32 xvfcmp_cult_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cult_s(_1, _2); }
4261 // CHECK-LABEL: @xvfcmp_cun_d(
4262 // CHECK-NEXT: entry:
4263 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cun.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4264 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4266 v4i64 xvfcmp_cun_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cun_d(_1, _2); }
4267 // CHECK-LABEL: @xvfcmp_cune_d(
4268 // CHECK-NEXT: entry:
4269 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cune.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4270 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4272 v4i64 xvfcmp_cune_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_cune_d(_1, _2); }
4273 // CHECK-LABEL: @xvfcmp_cune_s(
4274 // CHECK-NEXT: entry:
4275 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cune.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4276 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4278 v8i32 xvfcmp_cune_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cune_s(_1, _2); }
4279 // CHECK-LABEL: @xvfcmp_cun_s(
4280 // CHECK-NEXT: entry:
4281 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cun.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4282 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4284 v8i32 xvfcmp_cun_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_cun_s(_1, _2); }
4285 // CHECK-LABEL: @xvfcmp_saf_d(
4286 // CHECK-NEXT: entry:
4287 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.saf.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4288 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4290 v4i64 xvfcmp_saf_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_saf_d(_1, _2); }
4291 // CHECK-LABEL: @xvfcmp_saf_s(
4292 // CHECK-NEXT: entry:
4293 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.saf.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4294 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4296 v8i32 xvfcmp_saf_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_saf_s(_1, _2); }
4297 // CHECK-LABEL: @xvfcmp_seq_d(
4298 // CHECK-NEXT: entry:
4299 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.seq.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4300 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4302 v4i64 xvfcmp_seq_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_seq_d(_1, _2); }
4303 // CHECK-LABEL: @xvfcmp_seq_s(
4304 // CHECK-NEXT: entry:
4305 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.seq.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4306 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4308 v8i32 xvfcmp_seq_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_seq_s(_1, _2); }
4309 // CHECK-LABEL: @xvfcmp_sle_d(
4310 // CHECK-NEXT: entry:
4311 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sle.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4312 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4314 v4i64 xvfcmp_sle_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sle_d(_1, _2); }
4315 // CHECK-LABEL: @xvfcmp_sle_s(
4316 // CHECK-NEXT: entry:
4317 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sle.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4318 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4320 v8i32 xvfcmp_sle_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sle_s(_1, _2); }
4321 // CHECK-LABEL: @xvfcmp_slt_d(
4322 // CHECK-NEXT: entry:
4323 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.slt.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4324 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4326 v4i64 xvfcmp_slt_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_slt_d(_1, _2); }
4327 // CHECK-LABEL: @xvfcmp_slt_s(
4328 // CHECK-NEXT: entry:
4329 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.slt.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4330 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4332 v8i32 xvfcmp_slt_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_slt_s(_1, _2); }
4333 // CHECK-LABEL: @xvfcmp_sne_d(
4334 // CHECK-NEXT: entry:
4335 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sne.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4336 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4338 v4i64 xvfcmp_sne_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sne_d(_1, _2); }
4339 // CHECK-LABEL: @xvfcmp_sne_s(
4340 // CHECK-NEXT: entry:
4341 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sne.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4342 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4344 v8i32 xvfcmp_sne_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sne_s(_1, _2); }
4345 // CHECK-LABEL: @xvfcmp_sor_d(
4346 // CHECK-NEXT: entry:
4347 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sor.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4348 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4350 v4i64 xvfcmp_sor_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sor_d(_1, _2); }
4351 // CHECK-LABEL: @xvfcmp_sor_s(
4352 // CHECK-NEXT: entry:
4353 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sor.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4354 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4356 v8i32 xvfcmp_sor_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sor_s(_1, _2); }
4357 // CHECK-LABEL: @xvfcmp_sueq_d(
4358 // CHECK-NEXT: entry:
4359 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sueq.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4360 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4362 v4i64 xvfcmp_sueq_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sueq_d(_1, _2); }
4363 // CHECK-LABEL: @xvfcmp_sueq_s(
4364 // CHECK-NEXT: entry:
4365 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sueq.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4366 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4368 v8i32 xvfcmp_sueq_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sueq_s(_1, _2); }
4369 // CHECK-LABEL: @xvfcmp_sule_d(
4370 // CHECK-NEXT: entry:
4371 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sule.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4372 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4374 v4i64 xvfcmp_sule_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sule_d(_1, _2); }
4375 // CHECK-LABEL: @xvfcmp_sule_s(
4376 // CHECK-NEXT: entry:
4377 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sule.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4378 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4380 v8i32 xvfcmp_sule_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sule_s(_1, _2); }
4381 // CHECK-LABEL: @xvfcmp_sult_d(
4382 // CHECK-NEXT: entry:
4383 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sult.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4384 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4386 v4i64 xvfcmp_sult_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sult_d(_1, _2); }
4387 // CHECK-LABEL: @xvfcmp_sult_s(
4388 // CHECK-NEXT: entry:
4389 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sult.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4390 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4392 v8i32 xvfcmp_sult_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sult_s(_1, _2); }
4393 // CHECK-LABEL: @xvfcmp_sun_d(
4394 // CHECK-NEXT: entry:
4395 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sun.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4396 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4398 v4i64 xvfcmp_sun_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sun_d(_1, _2); }
4399 // CHECK-LABEL: @xvfcmp_sune_d(
4400 // CHECK-NEXT: entry:
4401 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sune.d(<4 x double> [[_1:%.*]], <4 x double> [[_2:%.*]])
4402 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4404 v4i64 xvfcmp_sune_d(v4f64 _1, v4f64 _2) { return __builtin_lasx_xvfcmp_sune_d(_1, _2); }
4405 // CHECK-LABEL: @xvfcmp_sune_s(
4406 // CHECK-NEXT: entry:
4407 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sune.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4408 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4410 v8i32 xvfcmp_sune_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sune_s(_1, _2); }
4411 // CHECK-LABEL: @xvfcmp_sun_s(
4412 // CHECK-NEXT: entry:
4413 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sun.s(<8 x float> [[_1:%.*]], <8 x float> [[_2:%.*]])
4414 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4416 v8i32 xvfcmp_sun_s(v8f32 _1, v8f32 _2) { return __builtin_lasx_xvfcmp_sun_s(_1, _2); }
4417 // CHECK-LABEL: @xvpickve_d_f(
4418 // CHECK-NEXT: entry:
4419 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x double> @llvm.loongarch.lasx.xvpickve.d.f(<4 x double> [[_1:%.*]], i32 1)
4420 // CHECK-NEXT: ret <4 x double> [[TMP0]]
4422 v4f64 xvpickve_d_f(v4f64 _1) { return __builtin_lasx_xvpickve_d_f(_1, 1); }
4423 // CHECK-LABEL: @xvpickve_w_f(
4424 // CHECK-NEXT: entry:
4425 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.loongarch.lasx.xvpickve.w.f(<8 x float> [[_1:%.*]], i32 1)
4426 // CHECK-NEXT: ret <8 x float> [[TMP0]]
4428 v8f32 xvpickve_w_f(v8f32 _1) { return __builtin_lasx_xvpickve_w_f(_1, 1); }
4429 // CHECK-LABEL: @xvrepli_b(
4430 // CHECK-NEXT: entry:
4431 // CHECK-NEXT: [[TMP0:%.*]] = tail call <32 x i8> @llvm.loongarch.lasx.xvrepli.b(i32 1)
4432 // CHECK-NEXT: ret <32 x i8> [[TMP0]]
4434 v32i8 xvrepli_b() { return __builtin_lasx_xvrepli_b(1); }
4435 // CHECK-LABEL: @xvrepli_d(
4436 // CHECK-NEXT: entry:
4437 // CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i64> @llvm.loongarch.lasx.xvrepli.d(i32 1)
4438 // CHECK-NEXT: ret <4 x i64> [[TMP0]]
4440 v4i64 xvrepli_d() { return __builtin_lasx_xvrepli_d(1); }
4441 // CHECK-LABEL: @xvrepli_h(
4442 // CHECK-NEXT: entry:
4443 // CHECK-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.loongarch.lasx.xvrepli.h(i32 1)
4444 // CHECK-NEXT: ret <16 x i16> [[TMP0]]
4446 v16i16 xvrepli_h() { return __builtin_lasx_xvrepli_h(1); }
4447 // CHECK-LABEL: @xvrepli_w(
4448 // CHECK-NEXT: entry:
4449 // CHECK-NEXT: [[TMP0:%.*]] = tail call <8 x i32> @llvm.loongarch.lasx.xvrepli.w(i32 1)
4450 // CHECK-NEXT: ret <8 x i32> [[TMP0]]
4452 v8i32 xvrepli_w() { return __builtin_lasx_xvrepli_w(1); }