1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vceqz_s8(
10 // CHECK: [[TMP0:%.*]] = icmp eq <8 x i8> %a, zeroinitializer
11 // CHECK: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
12 // CHECK: ret <8 x i8> [[VCEQZ_I]]
13 uint8x8_t
test_vceqz_s8(int8x8_t a
) {
17 // CHECK-LABEL: @test_vceqz_s16(
18 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
19 // CHECK: [[TMP1:%.*]] = icmp eq <4 x i16> %a, zeroinitializer
20 // CHECK: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
21 // CHECK: ret <4 x i16> [[VCEQZ_I]]
22 uint16x4_t
test_vceqz_s16(int16x4_t a
) {
26 // CHECK-LABEL: @test_vceqz_s32(
27 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
28 // CHECK: [[TMP1:%.*]] = icmp eq <2 x i32> %a, zeroinitializer
29 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
30 // CHECK: ret <2 x i32> [[VCEQZ_I]]
31 uint32x2_t
test_vceqz_s32(int32x2_t a
) {
35 // CHECK-LABEL: @test_vceqz_s64(
36 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
37 // CHECK: [[TMP1:%.*]] = icmp eq <1 x i64> %a, zeroinitializer
38 // CHECK: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
39 // CHECK: ret <1 x i64> [[VCEQZ_I]]
40 uint64x1_t
test_vceqz_s64(int64x1_t a
) {
44 // CHECK-LABEL: @test_vceqz_u64(
45 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
46 // CHECK: [[TMP1:%.*]] = icmp eq <1 x i64> %a, zeroinitializer
47 // CHECK: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
48 // CHECK: ret <1 x i64> [[VCEQZ_I]]
49 uint64x1_t
test_vceqz_u64(uint64x1_t a
) {
53 // CHECK-LABEL: @test_vceqz_p64(
54 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
55 // CHECK: [[TMP1:%.*]] = icmp eq <1 x i64> %a, zeroinitializer
56 // CHECK: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
57 // CHECK: ret <1 x i64> [[VCEQZ_I]]
58 uint64x1_t
test_vceqz_p64(poly64x1_t a
) {
62 // CHECK-LABEL: @test_vceqzq_s8(
63 // CHECK: [[TMP0:%.*]] = icmp eq <16 x i8> %a, zeroinitializer
64 // CHECK: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
65 // CHECK: ret <16 x i8> [[VCEQZ_I]]
66 uint8x16_t
test_vceqzq_s8(int8x16_t a
) {
70 // CHECK-LABEL: @test_vceqzq_s16(
71 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
72 // CHECK: [[TMP1:%.*]] = icmp eq <8 x i16> %a, zeroinitializer
73 // CHECK: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
74 // CHECK: ret <8 x i16> [[VCEQZ_I]]
75 uint16x8_t
test_vceqzq_s16(int16x8_t a
) {
79 // CHECK-LABEL: @test_vceqzq_s32(
80 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
81 // CHECK: [[TMP1:%.*]] = icmp eq <4 x i32> %a, zeroinitializer
82 // CHECK: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
83 // CHECK: ret <4 x i32> [[VCEQZ_I]]
84 uint32x4_t
test_vceqzq_s32(int32x4_t a
) {
88 // CHECK-LABEL: @test_vceqzq_s64(
89 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
90 // CHECK: [[TMP1:%.*]] = icmp eq <2 x i64> %a, zeroinitializer
91 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
92 // CHECK: ret <2 x i64> [[VCEQZ_I]]
93 uint64x2_t
test_vceqzq_s64(int64x2_t a
) {
97 // CHECK-LABEL: @test_vceqz_u8(
98 // CHECK: [[TMP0:%.*]] = icmp eq <8 x i8> %a, zeroinitializer
99 // CHECK: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
100 // CHECK: ret <8 x i8> [[VCEQZ_I]]
101 uint8x8_t
test_vceqz_u8(uint8x8_t a
) {
105 // CHECK-LABEL: @test_vceqz_u16(
106 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
107 // CHECK: [[TMP1:%.*]] = icmp eq <4 x i16> %a, zeroinitializer
108 // CHECK: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
109 // CHECK: ret <4 x i16> [[VCEQZ_I]]
110 uint16x4_t
test_vceqz_u16(uint16x4_t a
) {
114 // CHECK-LABEL: @test_vceqz_u32(
115 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
116 // CHECK: [[TMP1:%.*]] = icmp eq <2 x i32> %a, zeroinitializer
117 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
118 // CHECK: ret <2 x i32> [[VCEQZ_I]]
119 uint32x2_t
test_vceqz_u32(uint32x2_t a
) {
123 // CHECK-LABEL: @test_vceqzq_u8(
124 // CHECK: [[TMP0:%.*]] = icmp eq <16 x i8> %a, zeroinitializer
125 // CHECK: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
126 // CHECK: ret <16 x i8> [[VCEQZ_I]]
127 uint8x16_t
test_vceqzq_u8(uint8x16_t a
) {
131 // CHECK-LABEL: @test_vceqzq_u16(
132 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
133 // CHECK: [[TMP1:%.*]] = icmp eq <8 x i16> %a, zeroinitializer
134 // CHECK: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
135 // CHECK: ret <8 x i16> [[VCEQZ_I]]
136 uint16x8_t
test_vceqzq_u16(uint16x8_t a
) {
137 return vceqzq_u16(a
);
140 // CHECK-LABEL: @test_vceqzq_u32(
141 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
142 // CHECK: [[TMP1:%.*]] = icmp eq <4 x i32> %a, zeroinitializer
143 // CHECK: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
144 // CHECK: ret <4 x i32> [[VCEQZ_I]]
145 uint32x4_t
test_vceqzq_u32(uint32x4_t a
) {
146 return vceqzq_u32(a
);
149 // CHECK-LABEL: @test_vceqzq_u64(
150 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
151 // CHECK: [[TMP1:%.*]] = icmp eq <2 x i64> %a, zeroinitializer
152 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
153 // CHECK: ret <2 x i64> [[VCEQZ_I]]
154 uint64x2_t
test_vceqzq_u64(uint64x2_t a
) {
155 return vceqzq_u64(a
);
158 // CHECK-LABEL: @test_vceqz_f32(
159 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
160 // CHECK: [[TMP1:%.*]] = fcmp oeq <2 x float> %a, zeroinitializer
161 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
162 // CHECK: ret <2 x i32> [[VCEQZ_I]]
163 uint32x2_t
test_vceqz_f32(float32x2_t a
) {
167 // CHECK-LABEL: @test_vceqz_f64(
168 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
169 // CHECK: [[TMP1:%.*]] = fcmp oeq <1 x double> %a, zeroinitializer
170 // CHECK: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
171 // CHECK: ret <1 x i64> [[VCEQZ_I]]
172 uint64x1_t
test_vceqz_f64(float64x1_t a
) {
176 // CHECK-LABEL: @test_vceqzq_f32(
177 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
178 // CHECK: [[TMP1:%.*]] = fcmp oeq <4 x float> %a, zeroinitializer
179 // CHECK: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
180 // CHECK: ret <4 x i32> [[VCEQZ_I]]
181 uint32x4_t
test_vceqzq_f32(float32x4_t a
) {
182 return vceqzq_f32(a
);
185 // CHECK-LABEL: @test_vceqz_p8(
186 // CHECK: [[TMP0:%.*]] = icmp eq <8 x i8> %a, zeroinitializer
187 // CHECK: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
188 // CHECK: ret <8 x i8> [[VCEQZ_I]]
189 uint8x8_t
test_vceqz_p8(poly8x8_t a
) {
193 // CHECK-LABEL: @test_vceqzq_p8(
194 // CHECK: [[TMP0:%.*]] = icmp eq <16 x i8> %a, zeroinitializer
195 // CHECK: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
196 // CHECK: ret <16 x i8> [[VCEQZ_I]]
197 uint8x16_t
test_vceqzq_p8(poly8x16_t a
) {
201 // CHECK-LABEL: @test_vceqzq_f64(
202 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
203 // CHECK: [[TMP1:%.*]] = fcmp oeq <2 x double> %a, zeroinitializer
204 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
205 // CHECK: ret <2 x i64> [[VCEQZ_I]]
206 uint64x2_t
test_vceqzq_f64(float64x2_t a
) {
207 return vceqzq_f64(a
);
210 // CHECK-LABEL: @test_vceqzq_p64(
211 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
212 // CHECK: [[TMP1:%.*]] = icmp eq <2 x i64> %a, zeroinitializer
213 // CHECK: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
214 // CHECK: ret <2 x i64> [[VCEQZ_I]]
215 uint64x2_t
test_vceqzq_p64(poly64x2_t a
) {
216 return vceqzq_p64(a
);
219 // CHECK-LABEL: @test_vcgez_s8(
220 // CHECK: [[TMP0:%.*]] = icmp sge <8 x i8> %a, zeroinitializer
221 // CHECK: [[VCGEZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
222 // CHECK: ret <8 x i8> [[VCGEZ_I]]
223 uint8x8_t
test_vcgez_s8(int8x8_t a
) {
227 // CHECK-LABEL: @test_vcgez_s16(
228 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
229 // CHECK: [[TMP1:%.*]] = icmp sge <4 x i16> %a, zeroinitializer
230 // CHECK: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
231 // CHECK: ret <4 x i16> [[VCGEZ_I]]
232 uint16x4_t
test_vcgez_s16(int16x4_t a
) {
236 // CHECK-LABEL: @test_vcgez_s32(
237 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
238 // CHECK: [[TMP1:%.*]] = icmp sge <2 x i32> %a, zeroinitializer
239 // CHECK: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
240 // CHECK: ret <2 x i32> [[VCGEZ_I]]
241 uint32x2_t
test_vcgez_s32(int32x2_t a
) {
245 // CHECK-LABEL: @test_vcgez_s64(
246 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
247 // CHECK: [[TMP1:%.*]] = icmp sge <1 x i64> %a, zeroinitializer
248 // CHECK: [[VCGEZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
249 // CHECK: ret <1 x i64> [[VCGEZ_I]]
250 uint64x1_t
test_vcgez_s64(int64x1_t a
) {
254 // CHECK-LABEL: @test_vcgezq_s8(
255 // CHECK: [[TMP0:%.*]] = icmp sge <16 x i8> %a, zeroinitializer
256 // CHECK: [[VCGEZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
257 // CHECK: ret <16 x i8> [[VCGEZ_I]]
258 uint8x16_t
test_vcgezq_s8(int8x16_t a
) {
262 // CHECK-LABEL: @test_vcgezq_s16(
263 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
264 // CHECK: [[TMP1:%.*]] = icmp sge <8 x i16> %a, zeroinitializer
265 // CHECK: [[VCGEZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
266 // CHECK: ret <8 x i16> [[VCGEZ_I]]
267 uint16x8_t
test_vcgezq_s16(int16x8_t a
) {
268 return vcgezq_s16(a
);
271 // CHECK-LABEL: @test_vcgezq_s32(
272 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
273 // CHECK: [[TMP1:%.*]] = icmp sge <4 x i32> %a, zeroinitializer
274 // CHECK: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
275 // CHECK: ret <4 x i32> [[VCGEZ_I]]
276 uint32x4_t
test_vcgezq_s32(int32x4_t a
) {
277 return vcgezq_s32(a
);
280 // CHECK-LABEL: @test_vcgezq_s64(
281 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
282 // CHECK: [[TMP1:%.*]] = icmp sge <2 x i64> %a, zeroinitializer
283 // CHECK: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
284 // CHECK: ret <2 x i64> [[VCGEZ_I]]
285 uint64x2_t
test_vcgezq_s64(int64x2_t a
) {
286 return vcgezq_s64(a
);
289 // CHECK-LABEL: @test_vcgez_f32(
290 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
291 // CHECK: [[TMP1:%.*]] = fcmp oge <2 x float> %a, zeroinitializer
292 // CHECK: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
293 // CHECK: ret <2 x i32> [[VCGEZ_I]]
294 uint32x2_t
test_vcgez_f32(float32x2_t a
) {
298 // CHECK-LABEL: @test_vcgez_f64(
299 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
300 // CHECK: [[TMP1:%.*]] = fcmp oge <1 x double> %a, zeroinitializer
301 // CHECK: [[VCGEZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
302 // CHECK: ret <1 x i64> [[VCGEZ_I]]
303 uint64x1_t
test_vcgez_f64(float64x1_t a
) {
307 // CHECK-LABEL: @test_vcgezq_f32(
308 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
309 // CHECK: [[TMP1:%.*]] = fcmp oge <4 x float> %a, zeroinitializer
310 // CHECK: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
311 // CHECK: ret <4 x i32> [[VCGEZ_I]]
312 uint32x4_t
test_vcgezq_f32(float32x4_t a
) {
313 return vcgezq_f32(a
);
316 // CHECK-LABEL: @test_vcgezq_f64(
317 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
318 // CHECK: [[TMP1:%.*]] = fcmp oge <2 x double> %a, zeroinitializer
319 // CHECK: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
320 // CHECK: ret <2 x i64> [[VCGEZ_I]]
321 uint64x2_t
test_vcgezq_f64(float64x2_t a
) {
322 return vcgezq_f64(a
);
325 // CHECK-LABEL: @test_vclez_s8(
326 // CHECK: [[TMP0:%.*]] = icmp sle <8 x i8> %a, zeroinitializer
327 // CHECK: [[VCLEZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
328 // CHECK: ret <8 x i8> [[VCLEZ_I]]
329 uint8x8_t
test_vclez_s8(int8x8_t a
) {
333 // CHECK-LABEL: @test_vclez_s16(
334 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
335 // CHECK: [[TMP1:%.*]] = icmp sle <4 x i16> %a, zeroinitializer
336 // CHECK: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
337 // CHECK: ret <4 x i16> [[VCLEZ_I]]
338 uint16x4_t
test_vclez_s16(int16x4_t a
) {
342 // CHECK-LABEL: @test_vclez_s32(
343 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
344 // CHECK: [[TMP1:%.*]] = icmp sle <2 x i32> %a, zeroinitializer
345 // CHECK: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
346 // CHECK: ret <2 x i32> [[VCLEZ_I]]
347 uint32x2_t
test_vclez_s32(int32x2_t a
) {
351 // CHECK-LABEL: @test_vclez_s64(
352 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
353 // CHECK: [[TMP1:%.*]] = icmp sle <1 x i64> %a, zeroinitializer
354 // CHECK: [[VCLEZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
355 // CHECK: ret <1 x i64> [[VCLEZ_I]]
356 uint64x1_t
test_vclez_s64(int64x1_t a
) {
360 // CHECK-LABEL: @test_vclezq_s8(
361 // CHECK: [[TMP0:%.*]] = icmp sle <16 x i8> %a, zeroinitializer
362 // CHECK: [[VCLEZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
363 // CHECK: ret <16 x i8> [[VCLEZ_I]]
364 uint8x16_t
test_vclezq_s8(int8x16_t a
) {
368 // CHECK-LABEL: @test_vclezq_s16(
369 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
370 // CHECK: [[TMP1:%.*]] = icmp sle <8 x i16> %a, zeroinitializer
371 // CHECK: [[VCLEZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
372 // CHECK: ret <8 x i16> [[VCLEZ_I]]
373 uint16x8_t
test_vclezq_s16(int16x8_t a
) {
374 return vclezq_s16(a
);
377 // CHECK-LABEL: @test_vclezq_s32(
378 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
379 // CHECK: [[TMP1:%.*]] = icmp sle <4 x i32> %a, zeroinitializer
380 // CHECK: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
381 // CHECK: ret <4 x i32> [[VCLEZ_I]]
382 uint32x4_t
test_vclezq_s32(int32x4_t a
) {
383 return vclezq_s32(a
);
386 // CHECK-LABEL: @test_vclezq_s64(
387 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
388 // CHECK: [[TMP1:%.*]] = icmp sle <2 x i64> %a, zeroinitializer
389 // CHECK: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
390 // CHECK: ret <2 x i64> [[VCLEZ_I]]
391 uint64x2_t
test_vclezq_s64(int64x2_t a
) {
392 return vclezq_s64(a
);
395 // CHECK-LABEL: @test_vclez_f32(
396 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
397 // CHECK: [[TMP1:%.*]] = fcmp ole <2 x float> %a, zeroinitializer
398 // CHECK: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
399 // CHECK: ret <2 x i32> [[VCLEZ_I]]
400 uint32x2_t
test_vclez_f32(float32x2_t a
) {
404 // CHECK-LABEL: @test_vclez_f64(
405 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
406 // CHECK: [[TMP1:%.*]] = fcmp ole <1 x double> %a, zeroinitializer
407 // CHECK: [[VCLEZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
408 // CHECK: ret <1 x i64> [[VCLEZ_I]]
409 uint64x1_t
test_vclez_f64(float64x1_t a
) {
413 // CHECK-LABEL: @test_vclezq_f32(
414 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
415 // CHECK: [[TMP1:%.*]] = fcmp ole <4 x float> %a, zeroinitializer
416 // CHECK: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
417 // CHECK: ret <4 x i32> [[VCLEZ_I]]
418 uint32x4_t
test_vclezq_f32(float32x4_t a
) {
419 return vclezq_f32(a
);
422 // CHECK-LABEL: @test_vclezq_f64(
423 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
424 // CHECK: [[TMP1:%.*]] = fcmp ole <2 x double> %a, zeroinitializer
425 // CHECK: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
426 // CHECK: ret <2 x i64> [[VCLEZ_I]]
427 uint64x2_t
test_vclezq_f64(float64x2_t a
) {
428 return vclezq_f64(a
);
431 // CHECK-LABEL: @test_vcgtz_s8(
432 // CHECK: [[TMP0:%.*]] = icmp sgt <8 x i8> %a, zeroinitializer
433 // CHECK: [[VCGTZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
434 // CHECK: ret <8 x i8> [[VCGTZ_I]]
435 uint8x8_t
test_vcgtz_s8(int8x8_t a
) {
439 // CHECK-LABEL: @test_vcgtz_s16(
440 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
441 // CHECK: [[TMP1:%.*]] = icmp sgt <4 x i16> %a, zeroinitializer
442 // CHECK: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
443 // CHECK: ret <4 x i16> [[VCGTZ_I]]
444 uint16x4_t
test_vcgtz_s16(int16x4_t a
) {
448 // CHECK-LABEL: @test_vcgtz_s32(
449 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
450 // CHECK: [[TMP1:%.*]] = icmp sgt <2 x i32> %a, zeroinitializer
451 // CHECK: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
452 // CHECK: ret <2 x i32> [[VCGTZ_I]]
453 uint32x2_t
test_vcgtz_s32(int32x2_t a
) {
457 // CHECK-LABEL: @test_vcgtz_s64(
458 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
459 // CHECK: [[TMP1:%.*]] = icmp sgt <1 x i64> %a, zeroinitializer
460 // CHECK: [[VCGTZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
461 // CHECK: ret <1 x i64> [[VCGTZ_I]]
462 uint64x1_t
test_vcgtz_s64(int64x1_t a
) {
466 // CHECK-LABEL: @test_vcgtzq_s8(
467 // CHECK: [[TMP0:%.*]] = icmp sgt <16 x i8> %a, zeroinitializer
468 // CHECK: [[VCGTZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
469 // CHECK: ret <16 x i8> [[VCGTZ_I]]
470 uint8x16_t
test_vcgtzq_s8(int8x16_t a
) {
474 // CHECK-LABEL: @test_vcgtzq_s16(
475 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
476 // CHECK: [[TMP1:%.*]] = icmp sgt <8 x i16> %a, zeroinitializer
477 // CHECK: [[VCGTZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
478 // CHECK: ret <8 x i16> [[VCGTZ_I]]
479 uint16x8_t
test_vcgtzq_s16(int16x8_t a
) {
480 return vcgtzq_s16(a
);
483 // CHECK-LABEL: @test_vcgtzq_s32(
484 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
485 // CHECK: [[TMP1:%.*]] = icmp sgt <4 x i32> %a, zeroinitializer
486 // CHECK: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
487 // CHECK: ret <4 x i32> [[VCGTZ_I]]
488 uint32x4_t
test_vcgtzq_s32(int32x4_t a
) {
489 return vcgtzq_s32(a
);
492 // CHECK-LABEL: @test_vcgtzq_s64(
493 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
494 // CHECK: [[TMP1:%.*]] = icmp sgt <2 x i64> %a, zeroinitializer
495 // CHECK: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
496 // CHECK: ret <2 x i64> [[VCGTZ_I]]
497 uint64x2_t
test_vcgtzq_s64(int64x2_t a
) {
498 return vcgtzq_s64(a
);
501 // CHECK-LABEL: @test_vcgtz_f32(
502 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
503 // CHECK: [[TMP1:%.*]] = fcmp ogt <2 x float> %a, zeroinitializer
504 // CHECK: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
505 // CHECK: ret <2 x i32> [[VCGTZ_I]]
506 uint32x2_t
test_vcgtz_f32(float32x2_t a
) {
510 // CHECK-LABEL: @test_vcgtz_f64(
511 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
512 // CHECK: [[TMP1:%.*]] = fcmp ogt <1 x double> %a, zeroinitializer
513 // CHECK: [[VCGTZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
514 // CHECK: ret <1 x i64> [[VCGTZ_I]]
515 uint64x1_t
test_vcgtz_f64(float64x1_t a
) {
519 // CHECK-LABEL: @test_vcgtzq_f32(
520 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
521 // CHECK: [[TMP1:%.*]] = fcmp ogt <4 x float> %a, zeroinitializer
522 // CHECK: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
523 // CHECK: ret <4 x i32> [[VCGTZ_I]]
524 uint32x4_t
test_vcgtzq_f32(float32x4_t a
) {
525 return vcgtzq_f32(a
);
528 // CHECK-LABEL: @test_vcgtzq_f64(
529 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
530 // CHECK: [[TMP1:%.*]] = fcmp ogt <2 x double> %a, zeroinitializer
531 // CHECK: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
532 // CHECK: ret <2 x i64> [[VCGTZ_I]]
533 uint64x2_t
test_vcgtzq_f64(float64x2_t a
) {
534 return vcgtzq_f64(a
);
537 // CHECK-LABEL: @test_vcltz_s8(
538 // CHECK: [[TMP0:%.*]] = icmp slt <8 x i8> %a, zeroinitializer
539 // CHECK: [[VCLTZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
540 // CHECK: ret <8 x i8> [[VCLTZ_I]]
541 uint8x8_t
test_vcltz_s8(int8x8_t a
) {
545 // CHECK-LABEL: @test_vcltz_s16(
546 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
547 // CHECK: [[TMP1:%.*]] = icmp slt <4 x i16> %a, zeroinitializer
548 // CHECK: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
549 // CHECK: ret <4 x i16> [[VCLTZ_I]]
550 uint16x4_t
test_vcltz_s16(int16x4_t a
) {
554 // CHECK-LABEL: @test_vcltz_s32(
555 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
556 // CHECK: [[TMP1:%.*]] = icmp slt <2 x i32> %a, zeroinitializer
557 // CHECK: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
558 // CHECK: ret <2 x i32> [[VCLTZ_I]]
559 uint32x2_t
test_vcltz_s32(int32x2_t a
) {
563 // CHECK-LABEL: @test_vcltz_s64(
564 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
565 // CHECK: [[TMP1:%.*]] = icmp slt <1 x i64> %a, zeroinitializer
566 // CHECK: [[VCLTZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
567 // CHECK: ret <1 x i64> [[VCLTZ_I]]
568 uint64x1_t
test_vcltz_s64(int64x1_t a
) {
572 // CHECK-LABEL: @test_vcltzq_s8(
573 // CHECK: [[TMP0:%.*]] = icmp slt <16 x i8> %a, zeroinitializer
574 // CHECK: [[VCLTZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
575 // CHECK: ret <16 x i8> [[VCLTZ_I]]
576 uint8x16_t
test_vcltzq_s8(int8x16_t a
) {
580 // CHECK-LABEL: @test_vcltzq_s16(
581 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
582 // CHECK: [[TMP1:%.*]] = icmp slt <8 x i16> %a, zeroinitializer
583 // CHECK: [[VCLTZ_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
584 // CHECK: ret <8 x i16> [[VCLTZ_I]]
585 uint16x8_t
test_vcltzq_s16(int16x8_t a
) {
586 return vcltzq_s16(a
);
589 // CHECK-LABEL: @test_vcltzq_s32(
590 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
591 // CHECK: [[TMP1:%.*]] = icmp slt <4 x i32> %a, zeroinitializer
592 // CHECK: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
593 // CHECK: ret <4 x i32> [[VCLTZ_I]]
594 uint32x4_t
test_vcltzq_s32(int32x4_t a
) {
595 return vcltzq_s32(a
);
598 // CHECK-LABEL: @test_vcltzq_s64(
599 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
600 // CHECK: [[TMP1:%.*]] = icmp slt <2 x i64> %a, zeroinitializer
601 // CHECK: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
602 // CHECK: ret <2 x i64> [[VCLTZ_I]]
603 uint64x2_t
test_vcltzq_s64(int64x2_t a
) {
604 return vcltzq_s64(a
);
607 // CHECK-LABEL: @test_vcltz_f32(
608 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
609 // CHECK: [[TMP1:%.*]] = fcmp olt <2 x float> %a, zeroinitializer
610 // CHECK: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
611 // CHECK: ret <2 x i32> [[VCLTZ_I]]
612 uint32x2_t
test_vcltz_f32(float32x2_t a
) {
616 // CHECK-LABEL: @test_vcltz_f64(
617 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
618 // CHECK: [[TMP1:%.*]] = fcmp olt <1 x double> %a, zeroinitializer
619 // CHECK: [[VCLTZ_I:%.*]] = sext <1 x i1> [[TMP1]] to <1 x i64>
620 // CHECK: ret <1 x i64> [[VCLTZ_I]]
621 uint64x1_t
test_vcltz_f64(float64x1_t a
) {
625 // CHECK-LABEL: @test_vcltzq_f32(
626 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
627 // CHECK: [[TMP1:%.*]] = fcmp olt <4 x float> %a, zeroinitializer
628 // CHECK: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
629 // CHECK: ret <4 x i32> [[VCLTZ_I]]
630 uint32x4_t
test_vcltzq_f32(float32x4_t a
) {
631 return vcltzq_f32(a
);
634 // CHECK-LABEL: @test_vcltzq_f64(
635 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
636 // CHECK: [[TMP1:%.*]] = fcmp olt <2 x double> %a, zeroinitializer
637 // CHECK: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
638 // CHECK: ret <2 x i64> [[VCLTZ_I]]
639 uint64x2_t
test_vcltzq_f64(float64x2_t a
) {
640 return vcltzq_f64(a
);
643 // CHECK-LABEL: @test_vrev16_s8(
644 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
645 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
646 int8x8_t
test_vrev16_s8(int8x8_t a
) {
650 // CHECK-LABEL: @test_vrev16_u8(
651 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
652 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
653 uint8x8_t
test_vrev16_u8(uint8x8_t a
) {
657 // CHECK-LABEL: @test_vrev16_p8(
658 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
659 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
660 poly8x8_t
test_vrev16_p8(poly8x8_t a
) {
664 // CHECK-LABEL: @test_vrev16q_s8(
665 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
666 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
667 int8x16_t
test_vrev16q_s8(int8x16_t a
) {
668 return vrev16q_s8(a
);
671 // CHECK-LABEL: @test_vrev16q_u8(
672 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
673 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
674 uint8x16_t
test_vrev16q_u8(uint8x16_t a
) {
675 return vrev16q_u8(a
);
678 // CHECK-LABEL: @test_vrev16q_p8(
679 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
680 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
681 poly8x16_t
test_vrev16q_p8(poly8x16_t a
) {
682 return vrev16q_p8(a
);
685 // CHECK-LABEL: @test_vrev32_s8(
686 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
687 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
688 int8x8_t
test_vrev32_s8(int8x8_t a
) {
692 // CHECK-LABEL: @test_vrev32_s16(
693 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
694 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
695 int16x4_t
test_vrev32_s16(int16x4_t a
) {
696 return vrev32_s16(a
);
699 // CHECK-LABEL: @test_vrev32_u8(
700 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
701 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
702 uint8x8_t
test_vrev32_u8(uint8x8_t a
) {
706 // CHECK-LABEL: @test_vrev32_u16(
707 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
708 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
709 uint16x4_t
test_vrev32_u16(uint16x4_t a
) {
710 return vrev32_u16(a
);
713 // CHECK-LABEL: @test_vrev32_p8(
714 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
715 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
716 poly8x8_t
test_vrev32_p8(poly8x8_t a
) {
720 // CHECK-LABEL: @test_vrev32_p16(
721 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
722 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
723 poly16x4_t
test_vrev32_p16(poly16x4_t a
) {
724 return vrev32_p16(a
);
727 // CHECK-LABEL: @test_vrev32q_s8(
728 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
729 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
730 int8x16_t
test_vrev32q_s8(int8x16_t a
) {
731 return vrev32q_s8(a
);
734 // CHECK-LABEL: @test_vrev32q_s16(
735 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
736 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
737 int16x8_t
test_vrev32q_s16(int16x8_t a
) {
738 return vrev32q_s16(a
);
741 // CHECK-LABEL: @test_vrev32q_u8(
742 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
743 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
744 uint8x16_t
test_vrev32q_u8(uint8x16_t a
) {
745 return vrev32q_u8(a
);
748 // CHECK-LABEL: @test_vrev32q_u16(
749 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
750 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
751 uint16x8_t
test_vrev32q_u16(uint16x8_t a
) {
752 return vrev32q_u16(a
);
755 // CHECK-LABEL: @test_vrev32q_p8(
756 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
757 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
758 poly8x16_t
test_vrev32q_p8(poly8x16_t a
) {
759 return vrev32q_p8(a
);
762 // CHECK-LABEL: @test_vrev32q_p16(
763 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
764 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
765 poly16x8_t
test_vrev32q_p16(poly16x8_t a
) {
766 return vrev32q_p16(a
);
769 // CHECK-LABEL: @test_vrev64_s8(
770 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
771 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
772 int8x8_t
test_vrev64_s8(int8x8_t a
) {
776 // CHECK-LABEL: @test_vrev64_s16(
777 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
778 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
779 int16x4_t
test_vrev64_s16(int16x4_t a
) {
780 return vrev64_s16(a
);
783 // CHECK-LABEL: @test_vrev64_s32(
784 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %a, <2 x i32> <i32 1, i32 0>
785 // CHECK: ret <2 x i32> [[SHUFFLE_I]]
786 int32x2_t
test_vrev64_s32(int32x2_t a
) {
787 return vrev64_s32(a
);
790 // CHECK-LABEL: @test_vrev64_u8(
791 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
792 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
793 uint8x8_t
test_vrev64_u8(uint8x8_t a
) {
797 // CHECK-LABEL: @test_vrev64_u16(
798 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
799 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
800 uint16x4_t
test_vrev64_u16(uint16x4_t a
) {
801 return vrev64_u16(a
);
804 // CHECK-LABEL: @test_vrev64_u32(
805 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %a, <2 x i32> <i32 1, i32 0>
806 // CHECK: ret <2 x i32> [[SHUFFLE_I]]
807 uint32x2_t
test_vrev64_u32(uint32x2_t a
) {
808 return vrev64_u32(a
);
811 // CHECK-LABEL: @test_vrev64_p8(
812 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
813 // CHECK: ret <8 x i8> [[SHUFFLE_I]]
814 poly8x8_t
test_vrev64_p8(poly8x8_t a
) {
818 // CHECK-LABEL: @test_vrev64_p16(
819 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
820 // CHECK: ret <4 x i16> [[SHUFFLE_I]]
821 poly16x4_t
test_vrev64_p16(poly16x4_t a
) {
822 return vrev64_p16(a
);
825 // CHECK-LABEL: @test_vrev64_f32(
826 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %a, <2 x i32> <i32 1, i32 0>
827 // CHECK: ret <2 x float> [[SHUFFLE_I]]
828 float32x2_t
test_vrev64_f32(float32x2_t a
) {
829 return vrev64_f32(a
);
832 // CHECK-LABEL: @test_vrev64q_s8(
833 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
834 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
835 int8x16_t
test_vrev64q_s8(int8x16_t a
) {
836 return vrev64q_s8(a
);
839 // CHECK-LABEL: @test_vrev64q_s16(
840 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
841 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
842 int16x8_t
test_vrev64q_s16(int16x8_t a
) {
843 return vrev64q_s16(a
);
846 // CHECK-LABEL: @test_vrev64q_s32(
847 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
848 // CHECK: ret <4 x i32> [[SHUFFLE_I]]
849 int32x4_t
test_vrev64q_s32(int32x4_t a
) {
850 return vrev64q_s32(a
);
853 // CHECK-LABEL: @test_vrev64q_u8(
854 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
855 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
856 uint8x16_t
test_vrev64q_u8(uint8x16_t a
) {
857 return vrev64q_u8(a
);
860 // CHECK-LABEL: @test_vrev64q_u16(
861 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
862 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
863 uint16x8_t
test_vrev64q_u16(uint16x8_t a
) {
864 return vrev64q_u16(a
);
867 // CHECK-LABEL: @test_vrev64q_u32(
868 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
869 // CHECK: ret <4 x i32> [[SHUFFLE_I]]
870 uint32x4_t
test_vrev64q_u32(uint32x4_t a
) {
871 return vrev64q_u32(a
);
874 // CHECK-LABEL: @test_vrev64q_p8(
875 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
876 // CHECK: ret <16 x i8> [[SHUFFLE_I]]
877 poly8x16_t
test_vrev64q_p8(poly8x16_t a
) {
878 return vrev64q_p8(a
);
881 // CHECK-LABEL: @test_vrev64q_p16(
882 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
883 // CHECK: ret <8 x i16> [[SHUFFLE_I]]
884 poly16x8_t
test_vrev64q_p16(poly16x8_t a
) {
885 return vrev64q_p16(a
);
888 // CHECK-LABEL: @test_vrev64q_f32(
889 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
890 // CHECK: ret <4 x float> [[SHUFFLE_I]]
891 float32x4_t
test_vrev64q_f32(float32x4_t a
) {
892 return vrev64q_f32(a
);
895 // CHECK-LABEL: @test_vpaddl_s8(
896 // CHECK: [[VPADDL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %a)
897 // CHECK: ret <4 x i16> [[VPADDL_I]]
898 int16x4_t
test_vpaddl_s8(int8x8_t a
) {
902 // CHECK-LABEL: @test_vpaddl_s16(
903 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
904 // CHECK: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %a)
905 // CHECK: ret <2 x i32> [[VPADDL1_I]]
906 int32x2_t
test_vpaddl_s16(int16x4_t a
) {
907 return vpaddl_s16(a
);
910 // CHECK-LABEL: @test_vpaddl_s32(
911 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
912 // CHECK: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> %a)
913 // CHECK: ret <1 x i64> [[VPADDL1_I]]
914 int64x1_t
test_vpaddl_s32(int32x2_t a
) {
915 return vpaddl_s32(a
);
918 // CHECK-LABEL: @test_vpaddl_u8(
919 // CHECK: [[VPADDL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %a)
920 // CHECK: ret <4 x i16> [[VPADDL_I]]
921 uint16x4_t
test_vpaddl_u8(uint8x8_t a
) {
925 // CHECK-LABEL: @test_vpaddl_u16(
926 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
927 // CHECK: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %a)
928 // CHECK: ret <2 x i32> [[VPADDL1_I]]
929 uint32x2_t
test_vpaddl_u16(uint16x4_t a
) {
930 return vpaddl_u16(a
);
933 // CHECK-LABEL: @test_vpaddl_u32(
934 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
935 // CHECK: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %a)
936 // CHECK: ret <1 x i64> [[VPADDL1_I]]
937 uint64x1_t
test_vpaddl_u32(uint32x2_t a
) {
938 return vpaddl_u32(a
);
941 // CHECK-LABEL: @test_vpaddlq_s8(
942 // CHECK: [[VPADDL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %a)
943 // CHECK: ret <8 x i16> [[VPADDL_I]]
944 int16x8_t
test_vpaddlq_s8(int8x16_t a
) {
945 return vpaddlq_s8(a
);
948 // CHECK-LABEL: @test_vpaddlq_s16(
949 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
950 // CHECK: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %a)
951 // CHECK: ret <4 x i32> [[VPADDL1_I]]
952 int32x4_t
test_vpaddlq_s16(int16x8_t a
) {
953 return vpaddlq_s16(a
);
956 // CHECK-LABEL: @test_vpaddlq_s32(
957 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
958 // CHECK: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %a)
959 // CHECK: ret <2 x i64> [[VPADDL1_I]]
960 int64x2_t
test_vpaddlq_s32(int32x4_t a
) {
961 return vpaddlq_s32(a
);
964 // CHECK-LABEL: @test_vpaddlq_u8(
965 // CHECK: [[VPADDL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %a)
966 // CHECK: ret <8 x i16> [[VPADDL_I]]
967 uint16x8_t
test_vpaddlq_u8(uint8x16_t a
) {
968 return vpaddlq_u8(a
);
971 // CHECK-LABEL: @test_vpaddlq_u16(
972 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
973 // CHECK: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %a)
974 // CHECK: ret <4 x i32> [[VPADDL1_I]]
975 uint32x4_t
test_vpaddlq_u16(uint16x8_t a
) {
976 return vpaddlq_u16(a
);
979 // CHECK-LABEL: @test_vpaddlq_u32(
980 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
981 // CHECK: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %a)
982 // CHECK: ret <2 x i64> [[VPADDL1_I]]
983 uint64x2_t
test_vpaddlq_u32(uint32x4_t a
) {
984 return vpaddlq_u32(a
);
987 // CHECK-LABEL: @test_vpadal_s8(
988 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
989 // CHECK: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %b)
990 // CHECK: [[TMP1:%.*]] = add <4 x i16> [[VPADAL_I]], %a
991 // CHECK: ret <4 x i16> [[TMP1]]
992 int16x4_t
test_vpadal_s8(int16x4_t a
, int8x8_t b
) {
993 return vpadal_s8(a
, b
);
996 // CHECK-LABEL: @test_vpadal_s16(
997 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
998 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
999 // CHECK: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %b)
1000 // CHECK: [[TMP2:%.*]] = add <2 x i32> [[VPADAL1_I]], %a
1001 // CHECK: ret <2 x i32> [[TMP2]]
1002 int32x2_t
test_vpadal_s16(int32x2_t a
, int16x4_t b
) {
1003 return vpadal_s16(a
, b
);
1006 // CHECK-LABEL: @test_vpadal_s32(
1007 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
1008 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
1009 // CHECK: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> %b)
1010 // CHECK: [[TMP2:%.*]] = add <1 x i64> [[VPADAL1_I]], %a
1011 // CHECK: ret <1 x i64> [[TMP2]]
1012 int64x1_t
test_vpadal_s32(int64x1_t a
, int32x2_t b
) {
1013 return vpadal_s32(a
, b
);
1016 // CHECK-LABEL: @test_vpadal_u8(
1017 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1018 // CHECK: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %b)
1019 // CHECK: [[TMP1:%.*]] = add <4 x i16> [[VPADAL_I]], %a
1020 // CHECK: ret <4 x i16> [[TMP1]]
1021 uint16x4_t
test_vpadal_u8(uint16x4_t a
, uint8x8_t b
) {
1022 return vpadal_u8(a
, b
);
1025 // CHECK-LABEL: @test_vpadal_u16(
1026 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1027 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
1028 // CHECK: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %b)
1029 // CHECK: [[TMP2:%.*]] = add <2 x i32> [[VPADAL1_I]], %a
1030 // CHECK: ret <2 x i32> [[TMP2]]
1031 uint32x2_t
test_vpadal_u16(uint32x2_t a
, uint16x4_t b
) {
1032 return vpadal_u16(a
, b
);
1035 // CHECK-LABEL: @test_vpadal_u32(
1036 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
1037 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
1038 // CHECK: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %b)
1039 // CHECK: [[TMP2:%.*]] = add <1 x i64> [[VPADAL1_I]], %a
1040 // CHECK: ret <1 x i64> [[TMP2]]
1041 uint64x1_t
test_vpadal_u32(uint64x1_t a
, uint32x2_t b
) {
1042 return vpadal_u32(a
, b
);
1045 // CHECK-LABEL: @test_vpadalq_s8(
1046 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1047 // CHECK: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %b)
1048 // CHECK: [[TMP1:%.*]] = add <8 x i16> [[VPADAL_I]], %a
1049 // CHECK: ret <8 x i16> [[TMP1]]
1050 int16x8_t
test_vpadalq_s8(int16x8_t a
, int8x16_t b
) {
1051 return vpadalq_s8(a
, b
);
1054 // CHECK-LABEL: @test_vpadalq_s16(
1055 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1056 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1057 // CHECK: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %b)
1058 // CHECK: [[TMP2:%.*]] = add <4 x i32> [[VPADAL1_I]], %a
1059 // CHECK: ret <4 x i32> [[TMP2]]
1060 int32x4_t
test_vpadalq_s16(int32x4_t a
, int16x8_t b
) {
1061 return vpadalq_s16(a
, b
);
1064 // CHECK-LABEL: @test_vpadalq_s32(
1065 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1066 // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1067 // CHECK: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %b)
1068 // CHECK: [[TMP2:%.*]] = add <2 x i64> [[VPADAL1_I]], %a
1069 // CHECK: ret <2 x i64> [[TMP2]]
1070 int64x2_t
test_vpadalq_s32(int64x2_t a
, int32x4_t b
) {
1071 return vpadalq_s32(a
, b
);
1074 // CHECK-LABEL: @test_vpadalq_u8(
1075 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1076 // CHECK: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %b)
1077 // CHECK: [[TMP1:%.*]] = add <8 x i16> [[VPADAL_I]], %a
1078 // CHECK: ret <8 x i16> [[TMP1]]
1079 uint16x8_t
test_vpadalq_u8(uint16x8_t a
, uint8x16_t b
) {
1080 return vpadalq_u8(a
, b
);
1083 // CHECK-LABEL: @test_vpadalq_u16(
1084 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1085 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1086 // CHECK: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %b)
1087 // CHECK: [[TMP2:%.*]] = add <4 x i32> [[VPADAL1_I]], %a
1088 // CHECK: ret <4 x i32> [[TMP2]]
1089 uint32x4_t
test_vpadalq_u16(uint32x4_t a
, uint16x8_t b
) {
1090 return vpadalq_u16(a
, b
);
1093 // CHECK-LABEL: @test_vpadalq_u32(
1094 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1095 // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1096 // CHECK: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %b)
1097 // CHECK: [[TMP2:%.*]] = add <2 x i64> [[VPADAL1_I]], %a
1098 // CHECK: ret <2 x i64> [[TMP2]]
1099 uint64x2_t
test_vpadalq_u32(uint64x2_t a
, uint32x4_t b
) {
1100 return vpadalq_u32(a
, b
);
1103 // CHECK-LABEL: @test_vqabs_s8(
1104 // CHECK: [[VQABS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> %a)
1105 // CHECK: ret <8 x i8> [[VQABS_V_I]]
1106 int8x8_t
test_vqabs_s8(int8x8_t a
) {
1110 // CHECK-LABEL: @test_vqabsq_s8(
1111 // CHECK: [[VQABSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8> %a)
1112 // CHECK: ret <16 x i8> [[VQABSQ_V_I]]
1113 int8x16_t
test_vqabsq_s8(int8x16_t a
) {
1114 return vqabsq_s8(a
);
1117 // CHECK-LABEL: @test_vqabs_s16(
1118 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1119 // CHECK: [[VQABS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> %a)
1120 // CHECK: [[VQABS_V2_I:%.*]] = bitcast <4 x i16> [[VQABS_V1_I]] to <8 x i8>
1121 // CHECK: ret <4 x i16> [[VQABS_V1_I]]
1122 int16x4_t
test_vqabs_s16(int16x4_t a
) {
1123 return vqabs_s16(a
);
1126 // CHECK-LABEL: @test_vqabsq_s16(
1127 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1128 // CHECK: [[VQABSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16> %a)
1129 // CHECK: [[VQABSQ_V2_I:%.*]] = bitcast <8 x i16> [[VQABSQ_V1_I]] to <16 x i8>
1130 // CHECK: ret <8 x i16> [[VQABSQ_V1_I]]
1131 int16x8_t
test_vqabsq_s16(int16x8_t a
) {
1132 return vqabsq_s16(a
);
1135 // CHECK-LABEL: @test_vqabs_s32(
1136 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1137 // CHECK: [[VQABS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32> %a)
1138 // CHECK: [[VQABS_V2_I:%.*]] = bitcast <2 x i32> [[VQABS_V1_I]] to <8 x i8>
1139 // CHECK: ret <2 x i32> [[VQABS_V1_I]]
1140 int32x2_t
test_vqabs_s32(int32x2_t a
) {
1141 return vqabs_s32(a
);
1144 // CHECK-LABEL: @test_vqabsq_s32(
1145 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1146 // CHECK: [[VQABSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32> %a)
1147 // CHECK: [[VQABSQ_V2_I:%.*]] = bitcast <4 x i32> [[VQABSQ_V1_I]] to <16 x i8>
1148 // CHECK: ret <4 x i32> [[VQABSQ_V1_I]]
1149 int32x4_t
test_vqabsq_s32(int32x4_t a
) {
1150 return vqabsq_s32(a
);
1153 // CHECK-LABEL: @test_vqabsq_s64(
1154 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1155 // CHECK: [[VQABSQ_V1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqabs.v2i64(<2 x i64> %a)
1156 // CHECK: [[VQABSQ_V2_I:%.*]] = bitcast <2 x i64> [[VQABSQ_V1_I]] to <16 x i8>
1157 // CHECK: ret <2 x i64> [[VQABSQ_V1_I]]
1158 int64x2_t
test_vqabsq_s64(int64x2_t a
) {
1159 return vqabsq_s64(a
);
1162 // CHECK-LABEL: @test_vqneg_s8(
1163 // CHECK: [[VQNEG_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> %a)
1164 // CHECK: ret <8 x i8> [[VQNEG_V_I]]
1165 int8x8_t
test_vqneg_s8(int8x8_t a
) {
1169 // CHECK-LABEL: @test_vqnegq_s8(
1170 // CHECK: [[VQNEGQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8> %a)
1171 // CHECK: ret <16 x i8> [[VQNEGQ_V_I]]
1172 int8x16_t
test_vqnegq_s8(int8x16_t a
) {
1173 return vqnegq_s8(a
);
1176 // CHECK-LABEL: @test_vqneg_s16(
1177 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1178 // CHECK: [[VQNEG_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> %a)
1179 // CHECK: [[VQNEG_V2_I:%.*]] = bitcast <4 x i16> [[VQNEG_V1_I]] to <8 x i8>
1180 // CHECK: ret <4 x i16> [[VQNEG_V1_I]]
1181 int16x4_t
test_vqneg_s16(int16x4_t a
) {
1182 return vqneg_s16(a
);
1185 // CHECK-LABEL: @test_vqnegq_s16(
1186 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1187 // CHECK: [[VQNEGQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %a)
1188 // CHECK: [[VQNEGQ_V2_I:%.*]] = bitcast <8 x i16> [[VQNEGQ_V1_I]] to <16 x i8>
1189 // CHECK: ret <8 x i16> [[VQNEGQ_V1_I]]
1190 int16x8_t
test_vqnegq_s16(int16x8_t a
) {
1191 return vqnegq_s16(a
);
1194 // CHECK-LABEL: @test_vqneg_s32(
1195 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1196 // CHECK: [[VQNEG_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32> %a)
1197 // CHECK: [[VQNEG_V2_I:%.*]] = bitcast <2 x i32> [[VQNEG_V1_I]] to <8 x i8>
1198 // CHECK: ret <2 x i32> [[VQNEG_V1_I]]
1199 int32x2_t
test_vqneg_s32(int32x2_t a
) {
1200 return vqneg_s32(a
);
1203 // CHECK-LABEL: @test_vqnegq_s32(
1204 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1205 // CHECK: [[VQNEGQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32> %a)
1206 // CHECK: [[VQNEGQ_V2_I:%.*]] = bitcast <4 x i32> [[VQNEGQ_V1_I]] to <16 x i8>
1207 // CHECK: ret <4 x i32> [[VQNEGQ_V1_I]]
1208 int32x4_t
test_vqnegq_s32(int32x4_t a
) {
1209 return vqnegq_s32(a
);
1212 // CHECK-LABEL: @test_vqnegq_s64(
1213 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1214 // CHECK: [[VQNEGQ_V1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqneg.v2i64(<2 x i64> %a)
1215 // CHECK: [[VQNEGQ_V2_I:%.*]] = bitcast <2 x i64> [[VQNEGQ_V1_I]] to <16 x i8>
1216 // CHECK: ret <2 x i64> [[VQNEGQ_V1_I]]
1217 int64x2_t
test_vqnegq_s64(int64x2_t a
) {
1218 return vqnegq_s64(a
);
1221 // CHECK-LABEL: @test_vneg_s8(
1222 // CHECK: [[SUB_I:%.*]] = sub <8 x i8> zeroinitializer, %a
1223 // CHECK: ret <8 x i8> [[SUB_I]]
1224 int8x8_t
test_vneg_s8(int8x8_t a
) {
1228 // CHECK-LABEL: @test_vnegq_s8(
1229 // CHECK: [[SUB_I:%.*]] = sub <16 x i8> zeroinitializer, %a
1230 // CHECK: ret <16 x i8> [[SUB_I]]
1231 int8x16_t
test_vnegq_s8(int8x16_t a
) {
1235 // CHECK-LABEL: @test_vneg_s16(
1236 // CHECK: [[SUB_I:%.*]] = sub <4 x i16> zeroinitializer, %a
1237 // CHECK: ret <4 x i16> [[SUB_I]]
1238 int16x4_t
test_vneg_s16(int16x4_t a
) {
1242 // CHECK-LABEL: @test_vnegq_s16(
1243 // CHECK: [[SUB_I:%.*]] = sub <8 x i16> zeroinitializer, %a
1244 // CHECK: ret <8 x i16> [[SUB_I]]
1245 int16x8_t
test_vnegq_s16(int16x8_t a
) {
1246 return vnegq_s16(a
);
1249 // CHECK-LABEL: @test_vneg_s32(
1250 // CHECK: [[SUB_I:%.*]] = sub <2 x i32> zeroinitializer, %a
1251 // CHECK: ret <2 x i32> [[SUB_I]]
1252 int32x2_t
test_vneg_s32(int32x2_t a
) {
1256 // CHECK-LABEL: @test_vnegq_s32(
1257 // CHECK: [[SUB_I:%.*]] = sub <4 x i32> zeroinitializer, %a
1258 // CHECK: ret <4 x i32> [[SUB_I]]
1259 int32x4_t
test_vnegq_s32(int32x4_t a
) {
1260 return vnegq_s32(a
);
1263 // CHECK-LABEL: @test_vnegq_s64(
1264 // CHECK: [[SUB_I:%.*]] = sub <2 x i64> zeroinitializer, %a
1265 // CHECK: ret <2 x i64> [[SUB_I]]
1266 int64x2_t
test_vnegq_s64(int64x2_t a
) {
1267 return vnegq_s64(a
);
1270 // CHECK-LABEL: @test_vneg_f32(
1271 // CHECK: [[SUB_I:%.*]] = fneg <2 x float> %a
1272 // CHECK: ret <2 x float> [[SUB_I]]
1273 float32x2_t
test_vneg_f32(float32x2_t a
) {
1277 // CHECK-LABEL: @test_vnegq_f32(
1278 // CHECK: [[SUB_I:%.*]] = fneg <4 x float> %a
1279 // CHECK: ret <4 x float> [[SUB_I]]
1280 float32x4_t
test_vnegq_f32(float32x4_t a
) {
1281 return vnegq_f32(a
);
1284 // CHECK-LABEL: @test_vnegq_f64(
1285 // CHECK: [[SUB_I:%.*]] = fneg <2 x double> %a
1286 // CHECK: ret <2 x double> [[SUB_I]]
1287 float64x2_t
test_vnegq_f64(float64x2_t a
) {
1288 return vnegq_f64(a
);
1291 // CHECK-LABEL: @test_vabs_s8(
1292 // CHECK: [[VABS_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8> %a)
1293 // CHECK: ret <8 x i8> [[VABS_I]]
1294 int8x8_t
test_vabs_s8(int8x8_t a
) {
1298 // CHECK-LABEL: @test_vabsq_s8(
1299 // CHECK: [[VABS_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8> %a)
1300 // CHECK: ret <16 x i8> [[VABS_I]]
1301 int8x16_t
test_vabsq_s8(int8x16_t a
) {
1305 // CHECK-LABEL: @test_vabs_s16(
1306 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1307 // CHECK: [[VABS1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16> %a)
1308 // CHECK: ret <4 x i16> [[VABS1_I]]
1309 int16x4_t
test_vabs_s16(int16x4_t a
) {
1313 // CHECK-LABEL: @test_vabsq_s16(
1314 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1315 // CHECK: [[VABS1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16> %a)
1316 // CHECK: ret <8 x i16> [[VABS1_I]]
1317 int16x8_t
test_vabsq_s16(int16x8_t a
) {
1318 return vabsq_s16(a
);
1321 // CHECK-LABEL: @test_vabs_s32(
1322 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1323 // CHECK: [[VABS1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32> %a)
1324 // CHECK: ret <2 x i32> [[VABS1_I]]
1325 int32x2_t
test_vabs_s32(int32x2_t a
) {
1329 // CHECK-LABEL: @test_vabsq_s32(
1330 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1331 // CHECK: [[VABS1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32> %a)
1332 // CHECK: ret <4 x i32> [[VABS1_I]]
1333 int32x4_t
test_vabsq_s32(int32x4_t a
) {
1334 return vabsq_s32(a
);
1337 // CHECK-LABEL: @test_vabsq_s64(
1338 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1339 // CHECK: [[VABS1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.abs.v2i64(<2 x i64> %a)
1340 // CHECK: ret <2 x i64> [[VABS1_I]]
1341 int64x2_t
test_vabsq_s64(int64x2_t a
) {
1342 return vabsq_s64(a
);
1345 // CHECK-LABEL: @test_vabs_f32(
1346 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
1347 // CHECK: [[VABS1_I:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
1348 // CHECK: ret <2 x float> [[VABS1_I]]
1349 float32x2_t
test_vabs_f32(float32x2_t a
) {
1353 // CHECK-LABEL: @test_vabsq_f32(
1354 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
1355 // CHECK: [[VABS1_I:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
1356 // CHECK: ret <4 x float> [[VABS1_I]]
1357 float32x4_t
test_vabsq_f32(float32x4_t a
) {
1358 return vabsq_f32(a
);
1361 // CHECK-LABEL: @test_vabsq_f64(
1362 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
1363 // CHECK: [[VABS1_I:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> %a)
1364 // CHECK: ret <2 x double> [[VABS1_I]]
1365 float64x2_t
test_vabsq_f64(float64x2_t a
) {
1366 return vabsq_f64(a
);
1369 // CHECK-LABEL: @test_vuqadd_s8(
1370 // CHECK: [[VUQADD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %a, <8 x i8> %b)
1371 // CHECK: ret <8 x i8> [[VUQADD_I]]
1372 int8x8_t
test_vuqadd_s8(int8x8_t a
, int8x8_t b
) {
1373 return vuqadd_s8(a
, b
);
1376 // CHECK-LABEL: @test_vuqaddq_s8(
1377 // CHECK: [[VUQADD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %a, <16 x i8> %b)
1378 // CHECK: ret <16 x i8> [[VUQADD_I]]
1379 int8x16_t
test_vuqaddq_s8(int8x16_t a
, int8x16_t b
) {
1380 return vuqaddq_s8(a
, b
);
1383 // CHECK-LABEL: @test_vuqadd_s16(
1384 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1385 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
1386 // CHECK: [[VUQADD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %a, <4 x i16> %b)
1387 // CHECK: ret <4 x i16> [[VUQADD2_I]]
1388 int16x4_t
test_vuqadd_s16(int16x4_t a
, int16x4_t b
) {
1389 return vuqadd_s16(a
, b
);
1392 // CHECK-LABEL: @test_vuqaddq_s16(
1393 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1394 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1395 // CHECK: [[VUQADD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %a, <8 x i16> %b)
1396 // CHECK: ret <8 x i16> [[VUQADD2_I]]
1397 int16x8_t
test_vuqaddq_s16(int16x8_t a
, int16x8_t b
) {
1398 return vuqaddq_s16(a
, b
);
1401 // CHECK-LABEL: @test_vuqadd_s32(
1402 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1403 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
1404 // CHECK: [[VUQADD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %a, <2 x i32> %b)
1405 // CHECK: ret <2 x i32> [[VUQADD2_I]]
1406 int32x2_t
test_vuqadd_s32(int32x2_t a
, int32x2_t b
) {
1407 return vuqadd_s32(a
, b
);
1410 // CHECK-LABEL: @test_vuqaddq_s32(
1411 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1412 // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1413 // CHECK: [[VUQADD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %a, <4 x i32> %b)
1414 // CHECK: ret <4 x i32> [[VUQADD2_I]]
1415 int32x4_t
test_vuqaddq_s32(int32x4_t a
, int32x4_t b
) {
1416 return vuqaddq_s32(a
, b
);
1419 // CHECK-LABEL: @test_vuqaddq_s64(
1420 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1421 // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8>
1422 // CHECK: [[VUQADD2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %a, <2 x i64> %b)
1423 // CHECK: ret <2 x i64> [[VUQADD2_I]]
1424 int64x2_t
test_vuqaddq_s64(int64x2_t a
, int64x2_t b
) {
1425 return vuqaddq_s64(a
, b
);
1428 // CHECK-LABEL: @test_vcls_s8(
1429 // CHECK: [[VCLS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %a)
1430 // CHECK: ret <8 x i8> [[VCLS_V_I]]
1431 int8x8_t
test_vcls_s8(int8x8_t a
) {
1435 // CHECK-LABEL: @test_vcls_u8(
1436 // CHECK: [[VCLS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %a)
1437 // CHECK: ret <8 x i8> [[VCLS_V_I]]
1438 int8x8_t
test_vcls_u8(uint8x8_t a
) {
1442 // CHECK-LABEL: @test_vclsq_s8(
1443 // CHECK: [[VCLSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %a)
1444 // CHECK: ret <16 x i8> [[VCLSQ_V_I]]
1445 int8x16_t
test_vclsq_s8(int8x16_t a
) {
1449 // CHECK-LABEL: @test_vclsq_u8(
1450 // CHECK: [[VCLSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %a)
1451 // CHECK: ret <16 x i8> [[VCLSQ_V_I]]
1452 int8x16_t
test_vclsq_u8(uint8x16_t a
) {
1456 // CHECK-LABEL: @test_vcls_s16(
1457 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1458 // CHECK: [[VCLS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %a)
1459 // CHECK: [[VCLS_V2_I:%.*]] = bitcast <4 x i16> [[VCLS_V1_I]] to <8 x i8>
1460 // CHECK: ret <4 x i16> [[VCLS_V1_I]]
1461 int16x4_t
test_vcls_s16(int16x4_t a
) {
1465 // CHECK-LABEL: @test_vcls_u16(
1466 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1467 // CHECK: [[VCLS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %a)
1468 // CHECK: [[VCLS_V2_I:%.*]] = bitcast <4 x i16> [[VCLS_V1_I]] to <8 x i8>
1469 // CHECK: ret <4 x i16> [[VCLS_V1_I]]
1470 int16x4_t
test_vcls_u16(uint16x4_t a
) {
1474 // CHECK-LABEL: @test_vclsq_s16(
1475 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1476 // CHECK: [[VCLSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %a)
1477 // CHECK: [[VCLSQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLSQ_V1_I]] to <16 x i8>
1478 // CHECK: ret <8 x i16> [[VCLSQ_V1_I]]
1479 int16x8_t
test_vclsq_s16(int16x8_t a
) {
1480 return vclsq_s16(a
);
1483 // CHECK-LABEL: @test_vclsq_u16(
1484 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1485 // CHECK: [[VCLSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %a)
1486 // CHECK: [[VCLSQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLSQ_V1_I]] to <16 x i8>
1487 // CHECK: ret <8 x i16> [[VCLSQ_V1_I]]
1488 int16x8_t
test_vclsq_u16(uint16x8_t a
) {
1489 return vclsq_u16(a
);
1492 // CHECK-LABEL: @test_vcls_s32(
1493 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1494 // CHECK: [[VCLS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %a)
1495 // CHECK: [[VCLS_V2_I:%.*]] = bitcast <2 x i32> [[VCLS_V1_I]] to <8 x i8>
1496 // CHECK: ret <2 x i32> [[VCLS_V1_I]]
1497 int32x2_t
test_vcls_s32(int32x2_t a
) {
1501 // CHECK-LABEL: @test_vcls_u32(
1502 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1503 // CHECK: [[VCLS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %a)
1504 // CHECK: [[VCLS_V2_I:%.*]] = bitcast <2 x i32> [[VCLS_V1_I]] to <8 x i8>
1505 // CHECK: ret <2 x i32> [[VCLS_V1_I]]
1506 int32x2_t
test_vcls_u32(uint32x2_t a
) {
1510 // CHECK-LABEL: @test_vclsq_s32(
1511 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1512 // CHECK: [[VCLSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> %a)
1513 // CHECK: [[VCLSQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLSQ_V1_I]] to <16 x i8>
1514 // CHECK: ret <4 x i32> [[VCLSQ_V1_I]]
1515 int32x4_t
test_vclsq_s32(int32x4_t a
) {
1516 return vclsq_s32(a
);
1519 // CHECK-LABEL: @test_vclsq_u32(
1520 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1521 // CHECK: [[VCLSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> %a)
1522 // CHECK: [[VCLSQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLSQ_V1_I]] to <16 x i8>
1523 // CHECK: ret <4 x i32> [[VCLSQ_V1_I]]
1524 int32x4_t
test_vclsq_u32(uint32x4_t a
) {
1525 return vclsq_u32(a
);
1528 // CHECK-LABEL: @test_vclz_s8(
1529 // CHECK: [[VCLZ_V_I:%.*]] = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false)
1530 // CHECK: ret <8 x i8> [[VCLZ_V_I]]
1531 int8x8_t
test_vclz_s8(int8x8_t a
) {
1535 // CHECK-LABEL: @test_vclzq_s8(
1536 // CHECK: [[VCLZQ_V_I:%.*]] = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false)
1537 // CHECK: ret <16 x i8> [[VCLZQ_V_I]]
1538 int8x16_t
test_vclzq_s8(int8x16_t a
) {
1542 // CHECK-LABEL: @test_vclz_s16(
1543 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1544 // CHECK: [[VCLZ_V1_I:%.*]] = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false)
1545 // CHECK: [[VCLZ_V2_I:%.*]] = bitcast <4 x i16> [[VCLZ_V1_I]] to <8 x i8>
1546 // CHECK: ret <4 x i16> [[VCLZ_V1_I]]
1547 int16x4_t
test_vclz_s16(int16x4_t a
) {
1551 // CHECK-LABEL: @test_vclzq_s16(
1552 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1553 // CHECK: [[VCLZQ_V1_I:%.*]] = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false)
1554 // CHECK: [[VCLZQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLZQ_V1_I]] to <16 x i8>
1555 // CHECK: ret <8 x i16> [[VCLZQ_V1_I]]
1556 int16x8_t
test_vclzq_s16(int16x8_t a
) {
1557 return vclzq_s16(a
);
1560 // CHECK-LABEL: @test_vclz_s32(
1561 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1562 // CHECK: [[VCLZ_V1_I:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
1563 // CHECK: [[VCLZ_V2_I:%.*]] = bitcast <2 x i32> [[VCLZ_V1_I]] to <8 x i8>
1564 // CHECK: ret <2 x i32> [[VCLZ_V1_I]]
1565 int32x2_t
test_vclz_s32(int32x2_t a
) {
1569 // CHECK-LABEL: @test_vclzq_s32(
1570 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1571 // CHECK: [[VCLZQ_V1_I:%.*]] = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false)
1572 // CHECK: [[VCLZQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLZQ_V1_I]] to <16 x i8>
1573 // CHECK: ret <4 x i32> [[VCLZQ_V1_I]]
1574 int32x4_t
test_vclzq_s32(int32x4_t a
) {
1575 return vclzq_s32(a
);
1578 // CHECK-LABEL: @test_vclz_u8(
1579 // CHECK: [[VCLZ_V_I:%.*]] = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false)
1580 // CHECK: ret <8 x i8> [[VCLZ_V_I]]
1581 uint8x8_t
test_vclz_u8(uint8x8_t a
) {
1585 // CHECK-LABEL: @test_vclzq_u8(
1586 // CHECK: [[VCLZQ_V_I:%.*]] = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false)
1587 // CHECK: ret <16 x i8> [[VCLZQ_V_I]]
1588 uint8x16_t
test_vclzq_u8(uint8x16_t a
) {
1592 // CHECK-LABEL: @test_vclz_u16(
1593 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
1594 // CHECK: [[VCLZ_V1_I:%.*]] = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false)
1595 // CHECK: [[VCLZ_V2_I:%.*]] = bitcast <4 x i16> [[VCLZ_V1_I]] to <8 x i8>
1596 // CHECK: ret <4 x i16> [[VCLZ_V1_I]]
1597 uint16x4_t
test_vclz_u16(uint16x4_t a
) {
1601 // CHECK-LABEL: @test_vclzq_u16(
1602 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1603 // CHECK: [[VCLZQ_V1_I:%.*]] = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false)
1604 // CHECK: [[VCLZQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLZQ_V1_I]] to <16 x i8>
1605 // CHECK: ret <8 x i16> [[VCLZQ_V1_I]]
1606 uint16x8_t
test_vclzq_u16(uint16x8_t a
) {
1607 return vclzq_u16(a
);
1610 // CHECK-LABEL: @test_vclz_u32(
1611 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
1612 // CHECK: [[VCLZ_V1_I:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
1613 // CHECK: [[VCLZ_V2_I:%.*]] = bitcast <2 x i32> [[VCLZ_V1_I]] to <8 x i8>
1614 // CHECK: ret <2 x i32> [[VCLZ_V1_I]]
1615 uint32x2_t
test_vclz_u32(uint32x2_t a
) {
1619 // CHECK-LABEL: @test_vclzq_u32(
1620 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1621 // CHECK: [[VCLZQ_V1_I:%.*]] = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false)
1622 // CHECK: [[VCLZQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLZQ_V1_I]] to <16 x i8>
1623 // CHECK: ret <4 x i32> [[VCLZQ_V1_I]]
1624 uint32x4_t
test_vclzq_u32(uint32x4_t a
) {
1625 return vclzq_u32(a
);
1628 // CHECK-LABEL: @test_vcnt_s8(
1629 // CHECK: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %a)
1630 // CHECK: ret <8 x i8> [[VCNT_V_I]]
1631 int8x8_t
test_vcnt_s8(int8x8_t a
) {
1635 // CHECK-LABEL: @test_vcntq_s8(
1636 // CHECK: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
1637 // CHECK: ret <16 x i8> [[VCNTQ_V_I]]
1638 int8x16_t
test_vcntq_s8(int8x16_t a
) {
1642 // CHECK-LABEL: @test_vcnt_u8(
1643 // CHECK: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %a)
1644 // CHECK: ret <8 x i8> [[VCNT_V_I]]
1645 uint8x8_t
test_vcnt_u8(uint8x8_t a
) {
1649 // CHECK-LABEL: @test_vcntq_u8(
1650 // CHECK: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
1651 // CHECK: ret <16 x i8> [[VCNTQ_V_I]]
1652 uint8x16_t
test_vcntq_u8(uint8x16_t a
) {
1656 // CHECK-LABEL: @test_vcnt_p8(
1657 // CHECK: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %a)
1658 // CHECK: ret <8 x i8> [[VCNT_V_I]]
1659 poly8x8_t
test_vcnt_p8(poly8x8_t a
) {
1663 // CHECK-LABEL: @test_vcntq_p8(
1664 // CHECK: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
1665 // CHECK: ret <16 x i8> [[VCNTQ_V_I]]
1666 poly8x16_t
test_vcntq_p8(poly8x16_t a
) {
1670 // CHECK-LABEL: @test_vmvn_s8(
1671 // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %a, splat (i8 -1)
1672 // CHECK: ret <8 x i8> [[NEG_I]]
1673 int8x8_t
test_vmvn_s8(int8x8_t a
) {
1677 // CHECK-LABEL: @test_vmvnq_s8(
1678 // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %a, splat (i8 -1)
1679 // CHECK: ret <16 x i8> [[NEG_I]]
1680 int8x16_t
test_vmvnq_s8(int8x16_t a
) {
1684 // CHECK-LABEL: @test_vmvn_s16(
1685 // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %a, splat (i16 -1)
1686 // CHECK: ret <4 x i16> [[NEG_I]]
1687 int16x4_t
test_vmvn_s16(int16x4_t a
) {
1691 // CHECK-LABEL: @test_vmvnq_s16(
1692 // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %a, splat (i16 -1)
1693 // CHECK: ret <8 x i16> [[NEG_I]]
1694 int16x8_t
test_vmvnq_s16(int16x8_t a
) {
1695 return vmvnq_s16(a
);
1698 // CHECK-LABEL: @test_vmvn_s32(
1699 // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %a, splat (i32 -1)
1700 // CHECK: ret <2 x i32> [[NEG_I]]
1701 int32x2_t
test_vmvn_s32(int32x2_t a
) {
1705 // CHECK-LABEL: @test_vmvnq_s32(
1706 // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %a, splat (i32 -1)
1707 // CHECK: ret <4 x i32> [[NEG_I]]
1708 int32x4_t
test_vmvnq_s32(int32x4_t a
) {
1709 return vmvnq_s32(a
);
1712 // CHECK-LABEL: @test_vmvn_u8(
1713 // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %a, splat (i8 -1)
1714 // CHECK: ret <8 x i8> [[NEG_I]]
1715 uint8x8_t
test_vmvn_u8(uint8x8_t a
) {
1719 // CHECK-LABEL: @test_vmvnq_u8(
1720 // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %a, splat (i8 -1)
1721 // CHECK: ret <16 x i8> [[NEG_I]]
1722 uint8x16_t
test_vmvnq_u8(uint8x16_t a
) {
1726 // CHECK-LABEL: @test_vmvn_u16(
1727 // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %a, splat (i16 -1)
1728 // CHECK: ret <4 x i16> [[NEG_I]]
1729 uint16x4_t
test_vmvn_u16(uint16x4_t a
) {
1733 // CHECK-LABEL: @test_vmvnq_u16(
1734 // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %a, splat (i16 -1)
1735 // CHECK: ret <8 x i16> [[NEG_I]]
1736 uint16x8_t
test_vmvnq_u16(uint16x8_t a
) {
1737 return vmvnq_u16(a
);
1740 // CHECK-LABEL: @test_vmvn_u32(
1741 // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %a, splat (i32 -1)
1742 // CHECK: ret <2 x i32> [[NEG_I]]
1743 uint32x2_t
test_vmvn_u32(uint32x2_t a
) {
1747 // CHECK-LABEL: @test_vmvnq_u32(
1748 // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %a, splat (i32 -1)
1749 // CHECK: ret <4 x i32> [[NEG_I]]
1750 uint32x4_t
test_vmvnq_u32(uint32x4_t a
) {
1751 return vmvnq_u32(a
);
1754 // CHECK-LABEL: @test_vmvn_p8(
1755 // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %a, splat (i8 -1)
1756 // CHECK: ret <8 x i8> [[NEG_I]]
1757 poly8x8_t
test_vmvn_p8(poly8x8_t a
) {
1761 // CHECK-LABEL: @test_vmvnq_p8(
1762 // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %a, splat (i8 -1)
1763 // CHECK: ret <16 x i8> [[NEG_I]]
1764 poly8x16_t
test_vmvnq_p8(poly8x16_t a
) {
1768 // CHECK-LABEL: @test_vrbit_s8(
1769 // CHECK: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %a)
1770 // CHECK: ret <8 x i8> [[VRBIT_I]]
1771 int8x8_t
test_vrbit_s8(int8x8_t a
) {
1775 // CHECK-LABEL: @test_vrbitq_s8(
1776 // CHECK: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
1777 // CHECK: ret <16 x i8> [[VRBIT_I]]
1778 int8x16_t
test_vrbitq_s8(int8x16_t a
) {
1779 return vrbitq_s8(a
);
1782 // CHECK-LABEL: @test_vrbit_u8(
1783 // CHECK: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %a)
1784 // CHECK: ret <8 x i8> [[VRBIT_I]]
1785 uint8x8_t
test_vrbit_u8(uint8x8_t a
) {
1789 // CHECK-LABEL: @test_vrbitq_u8(
1790 // CHECK: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
1791 // CHECK: ret <16 x i8> [[VRBIT_I]]
1792 uint8x16_t
test_vrbitq_u8(uint8x16_t a
) {
1793 return vrbitq_u8(a
);
1796 // CHECK-LABEL: @test_vrbit_p8(
1797 // CHECK: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %a)
1798 // CHECK: ret <8 x i8> [[VRBIT_I]]
1799 poly8x8_t
test_vrbit_p8(poly8x8_t a
) {
1803 // CHECK-LABEL: @test_vrbitq_p8(
1804 // CHECK: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
1805 // CHECK: ret <16 x i8> [[VRBIT_I]]
1806 poly8x16_t
test_vrbitq_p8(poly8x16_t a
) {
1807 return vrbitq_p8(a
);
1810 // CHECK-LABEL: @test_vmovn_s16(
1811 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1812 // CHECK: [[VMOVN_I:%.*]] = trunc <8 x i16> %a to <8 x i8>
1813 // CHECK: ret <8 x i8> [[VMOVN_I]]
1814 int8x8_t
test_vmovn_s16(int16x8_t a
) {
1815 return vmovn_s16(a
);
1818 // CHECK-LABEL: @test_vmovn_s32(
1819 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1820 // CHECK: [[VMOVN_I:%.*]] = trunc <4 x i32> %a to <4 x i16>
1821 // CHECK: ret <4 x i16> [[VMOVN_I]]
1822 int16x4_t
test_vmovn_s32(int32x4_t a
) {
1823 return vmovn_s32(a
);
1826 // CHECK-LABEL: @test_vmovn_s64(
1827 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1828 // CHECK: [[VMOVN_I:%.*]] = trunc <2 x i64> %a to <2 x i32>
1829 // CHECK: ret <2 x i32> [[VMOVN_I]]
1830 int32x2_t
test_vmovn_s64(int64x2_t a
) {
1831 return vmovn_s64(a
);
1834 // CHECK-LABEL: @test_vmovn_u16(
1835 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1836 // CHECK: [[VMOVN_I:%.*]] = trunc <8 x i16> %a to <8 x i8>
1837 // CHECK: ret <8 x i8> [[VMOVN_I]]
1838 uint8x8_t
test_vmovn_u16(uint16x8_t a
) {
1839 return vmovn_u16(a
);
1842 // CHECK-LABEL: @test_vmovn_u32(
1843 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1844 // CHECK: [[VMOVN_I:%.*]] = trunc <4 x i32> %a to <4 x i16>
1845 // CHECK: ret <4 x i16> [[VMOVN_I]]
1846 uint16x4_t
test_vmovn_u32(uint32x4_t a
) {
1847 return vmovn_u32(a
);
1850 // CHECK-LABEL: @test_vmovn_u64(
1851 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1852 // CHECK: [[VMOVN_I:%.*]] = trunc <2 x i64> %a to <2 x i32>
1853 // CHECK: ret <2 x i32> [[VMOVN_I]]
1854 uint32x2_t
test_vmovn_u64(uint64x2_t a
) {
1855 return vmovn_u64(a
);
1858 // CHECK-LABEL: @test_vmovn_high_s16(
1859 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1860 // CHECK: [[VMOVN_I_I:%.*]] = trunc <8 x i16> %b to <8 x i8>
1861 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VMOVN_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1862 // CHECK: ret <16 x i8> [[SHUFFLE_I_I]]
1863 int8x16_t
test_vmovn_high_s16(int8x8_t a
, int16x8_t b
) {
1864 return vmovn_high_s16(a
, b
);
1867 // CHECK-LABEL: @test_vmovn_high_s32(
1868 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1869 // CHECK: [[VMOVN_I_I:%.*]] = trunc <4 x i32> %b to <4 x i16>
1870 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VMOVN_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
1871 // CHECK: ret <8 x i16> [[SHUFFLE_I_I]]
1872 int16x8_t
test_vmovn_high_s32(int16x4_t a
, int32x4_t b
) {
1873 return vmovn_high_s32(a
, b
);
1876 // CHECK-LABEL: @test_vmovn_high_s64(
1877 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8>
1878 // CHECK: [[VMOVN_I_I:%.*]] = trunc <2 x i64> %b to <2 x i32>
1879 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VMOVN_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
1880 // CHECK: ret <4 x i32> [[SHUFFLE_I_I]]
1881 int32x4_t
test_vmovn_high_s64(int32x2_t a
, int64x2_t b
) {
1882 return vmovn_high_s64(a
, b
);
1885 // CHECK-LABEL: @test_vmovn_high_u16(
1886 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1887 // CHECK: [[VMOVN_I_I:%.*]] = trunc <8 x i16> %b to <8 x i8>
1888 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VMOVN_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1889 // CHECK: ret <16 x i8> [[SHUFFLE_I_I]]
1890 int8x16_t
test_vmovn_high_u16(int8x8_t a
, int16x8_t b
) {
1891 return vmovn_high_u16(a
, b
);
1894 // CHECK-LABEL: @test_vmovn_high_u32(
1895 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1896 // CHECK: [[VMOVN_I_I:%.*]] = trunc <4 x i32> %b to <4 x i16>
1897 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VMOVN_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
1898 // CHECK: ret <8 x i16> [[SHUFFLE_I_I]]
1899 int16x8_t
test_vmovn_high_u32(int16x4_t a
, int32x4_t b
) {
1900 return vmovn_high_u32(a
, b
);
1903 // CHECK-LABEL: @test_vmovn_high_u64(
1904 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8>
1905 // CHECK: [[VMOVN_I_I:%.*]] = trunc <2 x i64> %b to <2 x i32>
1906 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VMOVN_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
1907 // CHECK: ret <4 x i32> [[SHUFFLE_I_I]]
1908 int32x4_t
test_vmovn_high_u64(int32x2_t a
, int64x2_t b
) {
1909 return vmovn_high_u64(a
, b
);
1912 // CHECK-LABEL: @test_vqmovun_s16(
1913 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1914 // CHECK: [[VQMOVUN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %a)
1915 // CHECK: ret <8 x i8> [[VQMOVUN_V1_I]]
1916 int8x8_t
test_vqmovun_s16(int16x8_t a
) {
1917 return vqmovun_s16(a
);
1920 // CHECK-LABEL: @test_vqmovun_s32(
1921 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1922 // CHECK: [[VQMOVUN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %a)
1923 // CHECK: [[VQMOVUN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVUN_V1_I]] to <8 x i8>
1924 // CHECK: ret <4 x i16> [[VQMOVUN_V1_I]]
1925 int16x4_t
test_vqmovun_s32(int32x4_t a
) {
1926 return vqmovun_s32(a
);
1929 // CHECK-LABEL: @test_vqmovun_s64(
1930 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1931 // CHECK: [[VQMOVUN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %a)
1932 // CHECK: [[VQMOVUN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I]] to <8 x i8>
1933 // CHECK: ret <2 x i32> [[VQMOVUN_V1_I]]
1934 int32x2_t
test_vqmovun_s64(int64x2_t a
) {
1935 return vqmovun_s64(a
);
1938 // CHECK-LABEL: @test_vqmovun_high_s16(
1939 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1940 // CHECK: [[VQMOVUN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %b)
1941 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQMOVUN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1942 // CHECK: ret <16 x i8> [[SHUFFLE_I_I]]
1943 uint8x16_t
test_vqmovun_high_s16(uint8x8_t a
, int16x8_t b
) {
1944 return vqmovun_high_s16(a
, b
);
1947 // CHECK-LABEL: @test_vqmovun_high_s32(
1948 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
1949 // CHECK: [[VQMOVUN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %b)
1950 // CHECK: [[VQMOVUN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVUN_V1_I_I]] to <8 x i8>
1951 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQMOVUN_V1_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
1952 // CHECK: ret <8 x i16> [[SHUFFLE_I_I]]
1953 uint16x8_t
test_vqmovun_high_s32(uint16x4_t a
, int32x4_t b
) {
1954 return vqmovun_high_s32(a
, b
);
1957 // CHECK-LABEL: @test_vqmovun_high_s64(
1958 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8>
1959 // CHECK: [[VQMOVUN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %b)
1960 // CHECK: [[VQMOVUN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I_I]] to <8 x i8>
1961 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQMOVUN_V1_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
1962 // CHECK: ret <4 x i32> [[SHUFFLE_I_I]]
1963 uint32x4_t
test_vqmovun_high_s64(uint32x2_t a
, int64x2_t b
) {
1964 return vqmovun_high_s64(a
, b
);
1967 // CHECK-LABEL: @test_vqmovn_s16(
1968 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
1969 // CHECK: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %a)
1970 // CHECK: ret <8 x i8> [[VQMOVN_V1_I]]
1971 int8x8_t
test_vqmovn_s16(int16x8_t a
) {
1972 return vqmovn_s16(a
);
1975 // CHECK-LABEL: @test_vqmovn_s32(
1976 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
1977 // CHECK: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %a)
1978 // CHECK: [[VQMOVN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I]] to <8 x i8>
1979 // CHECK: ret <4 x i16> [[VQMOVN_V1_I]]
1980 int16x4_t
test_vqmovn_s32(int32x4_t a
) {
1981 return vqmovn_s32(a
);
1984 // CHECK-LABEL: @test_vqmovn_s64(
1985 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
1986 // CHECK: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %a)
1987 // CHECK: [[VQMOVN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I]] to <8 x i8>
1988 // CHECK: ret <2 x i32> [[VQMOVN_V1_I]]
1989 int32x2_t
test_vqmovn_s64(int64x2_t a
) {
1990 return vqmovn_s64(a
);
1993 // CHECK-LABEL: @test_vqmovn_high_s16(
1994 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8>
1995 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %b)
1996 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQMOVN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1997 // CHECK: ret <16 x i8> [[SHUFFLE_I_I]]
1998 int8x16_t
test_vqmovn_high_s16(int8x8_t a
, int16x8_t b
) {
1999 return vqmovn_high_s16(a
, b
);
2002 // CHECK-LABEL: @test_vqmovn_high_s32(
2003 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
2004 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %b)
2005 // CHECK: [[VQMOVN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I_I]] to <8 x i8>
2006 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQMOVN_V1_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
2007 // CHECK: ret <8 x i16> [[SHUFFLE_I_I]]
2008 int16x8_t
test_vqmovn_high_s32(int16x4_t a
, int32x4_t b
) {
2009 return vqmovn_high_s32(a
, b
);
2012 // CHECK-LABEL: @test_vqmovn_high_s64(
2013 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8>
2014 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %b)
2015 // CHECK: [[VQMOVN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I_I]] to <8 x i8>
2016 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQMOVN_V1_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2017 // CHECK: ret <4 x i32> [[SHUFFLE_I_I]]
2018 int32x4_t
test_vqmovn_high_s64(int32x2_t a
, int64x2_t b
) {
2019 return vqmovn_high_s64(a
, b
);
2022 // CHECK-LABEL: @test_vqmovn_u16(
2023 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
2024 // CHECK: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %a)
2025 // CHECK: ret <8 x i8> [[VQMOVN_V1_I]]
2026 uint8x8_t
test_vqmovn_u16(uint16x8_t a
) {
2027 return vqmovn_u16(a
);
2030 // CHECK-LABEL: @test_vqmovn_u32(
2031 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
2032 // CHECK: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %a)
2033 // CHECK: [[VQMOVN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I]] to <8 x i8>
2034 // CHECK: ret <4 x i16> [[VQMOVN_V1_I]]
2035 uint16x4_t
test_vqmovn_u32(uint32x4_t a
) {
2036 return vqmovn_u32(a
);
2039 // CHECK-LABEL: @test_vqmovn_u64(
2040 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
2041 // CHECK: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %a)
2042 // CHECK: [[VQMOVN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I]] to <8 x i8>
2043 // CHECK: ret <2 x i32> [[VQMOVN_V1_I]]
2044 uint32x2_t
test_vqmovn_u64(uint64x2_t a
) {
2045 return vqmovn_u64(a
);
2048 // CHECK-LABEL: @test_vqmovn_high_u16(
2049 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8>
2050 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %b)
2051 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQMOVN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2052 // CHECK: ret <16 x i8> [[SHUFFLE_I_I]]
2053 uint8x16_t
test_vqmovn_high_u16(uint8x8_t a
, uint16x8_t b
) {
2054 return vqmovn_high_u16(a
, b
);
2057 // CHECK-LABEL: @test_vqmovn_high_u32(
2058 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8>
2059 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %b)
2060 // CHECK: [[VQMOVN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I_I]] to <8 x i8>
2061 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQMOVN_V1_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
2062 // CHECK: ret <8 x i16> [[SHUFFLE_I_I]]
2063 uint16x8_t
test_vqmovn_high_u32(uint16x4_t a
, uint32x4_t b
) {
2064 return vqmovn_high_u32(a
, b
);
2067 // CHECK-LABEL: @test_vqmovn_high_u64(
2068 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8>
2069 // CHECK: [[VQMOVN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %b)
2070 // CHECK: [[VQMOVN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I_I]] to <8 x i8>
2071 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQMOVN_V1_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2072 // CHECK: ret <4 x i32> [[SHUFFLE_I_I]]
2073 uint32x4_t
test_vqmovn_high_u64(uint32x2_t a
, uint64x2_t b
) {
2074 return vqmovn_high_u64(a
, b
);
2077 // CHECK-LABEL: @test_vshll_n_s8(
2078 // CHECK: [[TMP0:%.*]] = sext <8 x i8> %a to <8 x i16>
2079 // CHECK: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
2080 // CHECK: ret <8 x i16> [[VSHLL_N]]
2081 int16x8_t
test_vshll_n_s8(int8x8_t a
) {
2082 return vshll_n_s8(a
, 8);
2085 // CHECK-LABEL: @test_vshll_n_s16(
2086 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
2087 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2088 // CHECK: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
2089 // CHECK: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
2090 // CHECK: ret <4 x i32> [[VSHLL_N]]
2091 int32x4_t
test_vshll_n_s16(int16x4_t a
) {
2092 return vshll_n_s16(a
, 16);
2095 // CHECK-LABEL: @test_vshll_n_s32(
2096 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
2097 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
2098 // CHECK: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64>
2099 // CHECK: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
2100 // CHECK: ret <2 x i64> [[VSHLL_N]]
2101 int64x2_t
test_vshll_n_s32(int32x2_t a
) {
2102 return vshll_n_s32(a
, 32);
2105 // CHECK-LABEL: @test_vshll_n_u8(
2106 // CHECK: [[TMP0:%.*]] = zext <8 x i8> %a to <8 x i16>
2107 // CHECK: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
2108 // CHECK: ret <8 x i16> [[VSHLL_N]]
2109 uint16x8_t
test_vshll_n_u8(uint8x8_t a
) {
2110 return vshll_n_u8(a
, 8);
2113 // CHECK-LABEL: @test_vshll_n_u16(
2114 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
2115 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2116 // CHECK: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
2117 // CHECK: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
2118 // CHECK: ret <4 x i32> [[VSHLL_N]]
2119 uint32x4_t
test_vshll_n_u16(uint16x4_t a
) {
2120 return vshll_n_u16(a
, 16);
2123 // CHECK-LABEL: @test_vshll_n_u32(
2124 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
2125 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
2126 // CHECK: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
2127 // CHECK: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
2128 // CHECK: ret <2 x i64> [[VSHLL_N]]
2129 uint64x2_t
test_vshll_n_u32(uint32x2_t a
) {
2130 return vshll_n_u32(a
, 32);
2133 // CHECK-LABEL: @test_vshll_high_n_s8(
2134 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2135 // CHECK: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I]] to <8 x i16>
2136 // CHECK: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
2137 // CHECK: ret <8 x i16> [[VSHLL_N]]
2138 int16x8_t
test_vshll_high_n_s8(int8x16_t a
) {
2139 return vshll_high_n_s8(a
, 8);
2142 // CHECK-LABEL: @test_vshll_high_n_s16(
2143 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
2144 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8>
2145 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2146 // CHECK: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
2147 // CHECK: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
2148 // CHECK: ret <4 x i32> [[VSHLL_N]]
2149 int32x4_t
test_vshll_high_n_s16(int16x8_t a
) {
2150 return vshll_high_n_s16(a
, 16);
2153 // CHECK-LABEL: @test_vshll_high_n_s32(
2154 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 2, i32 3>
2155 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8>
2156 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
2157 // CHECK: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64>
2158 // CHECK: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
2159 // CHECK: ret <2 x i64> [[VSHLL_N]]
2160 int64x2_t
test_vshll_high_n_s32(int32x4_t a
) {
2161 return vshll_high_n_s32(a
, 32);
2164 // CHECK-LABEL: @test_vshll_high_n_u8(
2165 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2166 // CHECK: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I]] to <8 x i16>
2167 // CHECK: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
2168 // CHECK: ret <8 x i16> [[VSHLL_N]]
2169 uint16x8_t
test_vshll_high_n_u8(uint8x16_t a
) {
2170 return vshll_high_n_u8(a
, 8);
2173 // CHECK-LABEL: @test_vshll_high_n_u16(
2174 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
2175 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8>
2176 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2177 // CHECK: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
2178 // CHECK: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
2179 // CHECK: ret <4 x i32> [[VSHLL_N]]
2180 uint32x4_t
test_vshll_high_n_u16(uint16x8_t a
) {
2181 return vshll_high_n_u16(a
, 16);
2184 // CHECK-LABEL: @test_vshll_high_n_u32(
2185 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 2, i32 3>
2186 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8>
2187 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
2188 // CHECK: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
2189 // CHECK: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
2190 // CHECK: ret <2 x i64> [[VSHLL_N]]
2191 uint64x2_t
test_vshll_high_n_u32(uint32x4_t a
) {
2192 return vshll_high_n_u32(a
, 32);
2195 // CHECK-LABEL: @test_vcvt_f16_f32(
2196 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2197 // CHECK: [[VCVT_F16_F321_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> %a)
2198 // CHECK: [[VCVT_F16_F322_I:%.*]] = bitcast <4 x i16> [[VCVT_F16_F321_I]] to <8 x i8>
2199 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[VCVT_F16_F322_I]] to <4 x half>
2200 // CHECK: ret <4 x half> [[TMP1]]
2201 float16x4_t
test_vcvt_f16_f32(float32x4_t a
) {
2202 return vcvt_f16_f32(a
);
2205 // CHECK-LABEL: @test_vcvt_high_f16_f32(
2206 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %b to <16 x i8>
2207 // CHECK: [[VCVT_F16_F321_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> %b)
2208 // CHECK: [[VCVT_F16_F322_I_I:%.*]] = bitcast <4 x i16> [[VCVT_F16_F321_I_I]] to <8 x i8>
2209 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[VCVT_F16_F322_I_I]] to <4 x half>
2210 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x half> %a, <4 x half> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
2211 // CHECK: ret <8 x half> [[SHUFFLE_I_I]]
2212 float16x8_t
test_vcvt_high_f16_f32(float16x4_t a
, float32x4_t b
) {
2213 return vcvt_high_f16_f32(a
, b
);
2216 // CHECK-LABEL: @test_vcvt_f32_f64(
2217 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2218 // CHECK: [[VCVT_I:%.*]] = fptrunc <2 x double> %a to <2 x float>
2219 // CHECK: ret <2 x float> [[VCVT_I]]
2220 float32x2_t
test_vcvt_f32_f64(float64x2_t a
) {
2221 return vcvt_f32_f64(a
);
2224 // CHECK-LABEL: @test_vcvt_high_f32_f64(
2225 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %b to <16 x i8>
2226 // CHECK: [[VCVT_I_I:%.*]] = fptrunc <2 x double> %b to <2 x float>
2227 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x float> %a, <2 x float> [[VCVT_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2228 // CHECK: ret <4 x float> [[SHUFFLE_I_I]]
2229 float32x4_t
test_vcvt_high_f32_f64(float32x2_t a
, float64x2_t b
) {
2230 return vcvt_high_f32_f64(a
, b
);
2233 // CHECK-LABEL: @test_vcvtx_f32_f64(
2234 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2235 // CHECK: [[VCVTX_F32_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %a)
2236 // CHECK: ret <2 x float> [[VCVTX_F32_V1_I]]
2237 float32x2_t
test_vcvtx_f32_f64(float64x2_t a
) {
2238 return vcvtx_f32_f64(a
);
2241 // CHECK-LABEL: @test_vcvtx_high_f32_f64(
2242 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %b to <16 x i8>
2243 // CHECK: [[VCVTX_F32_V1_I_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %b)
2244 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x float> %a, <2 x float> [[VCVTX_F32_V1_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2245 // CHECK: ret <4 x float> [[SHUFFLE_I_I]]
2246 float32x4_t
test_vcvtx_high_f32_f64(float32x2_t a
, float64x2_t b
) {
2247 return vcvtx_high_f32_f64(a
, b
);
2250 // CHECK-LABEL: @test_vcvt_f32_f16(
2251 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
2252 // CHECK: [[VCVT_F32_F16_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2253 // CHECK: [[VCVT_F32_F161_I:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> [[VCVT_F32_F16_I]])
2254 // CHECK: [[VCVT_F32_F162_I:%.*]] = bitcast <4 x float> [[VCVT_F32_F161_I]] to <16 x i8>
2255 // CHECK: ret <4 x float> [[VCVT_F32_F161_I]]
2256 float32x4_t
test_vcvt_f32_f16(float16x4_t a
) {
2257 return vcvt_f32_f16(a
);
2260 // CHECK-LABEL: @test_vcvt_high_f32_f16(
2261 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
2262 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[SHUFFLE_I_I]] to <8 x i8>
2263 // CHECK: [[VCVT_F32_F16_I_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
2264 // CHECK: [[VCVT_F32_F161_I_I:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> [[VCVT_F32_F16_I_I]])
2265 // CHECK: [[VCVT_F32_F162_I_I:%.*]] = bitcast <4 x float> [[VCVT_F32_F161_I_I]] to <16 x i8>
2266 // CHECK: ret <4 x float> [[VCVT_F32_F161_I_I]]
2267 float32x4_t
test_vcvt_high_f32_f16(float16x8_t a
) {
2268 return vcvt_high_f32_f16(a
);
2271 // CHECK-LABEL: @test_vcvt_f64_f32(
2272 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2273 // CHECK: [[VCVT_I:%.*]] = fpext <2 x float> %a to <2 x double>
2274 // CHECK: ret <2 x double> [[VCVT_I]]
2275 float64x2_t
test_vcvt_f64_f32(float32x2_t a
) {
2276 return vcvt_f64_f32(a
);
2279 // CHECK-LABEL: @test_vcvt_high_f64_f32(
2280 // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> <i32 2, i32 3>
2281 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[SHUFFLE_I_I]] to <8 x i8>
2282 // CHECK: [[VCVT_I_I:%.*]] = fpext <2 x float> [[SHUFFLE_I_I]] to <2 x double>
2283 // CHECK: ret <2 x double> [[VCVT_I_I]]
2284 float64x2_t
test_vcvt_high_f64_f32(float32x4_t a
) {
2285 return vcvt_high_f64_f32(a
);
2288 // CHECK-LABEL: @test_vrndnq_f64(
2289 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2290 // CHECK: [[VRNDN1_I:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a)
2291 // CHECK: ret <2 x double> [[VRNDN1_I]]
2292 float64x2_t
test_vrndnq_f64(float64x2_t a
) {
2293 return vrndnq_f64(a
);
2296 // CHECK-LABEL: @test_vrndaq_f64(
2297 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2298 // CHECK: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> %a)
2299 // CHECK: ret <2 x double> [[VRNDA1_I]]
2300 float64x2_t
test_vrndaq_f64(float64x2_t a
) {
2301 return vrndaq_f64(a
);
2304 // CHECK-LABEL: @test_vrndpq_f64(
2305 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2306 // CHECK: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a)
2307 // CHECK: ret <2 x double> [[VRNDP1_I]]
2308 float64x2_t
test_vrndpq_f64(float64x2_t a
) {
2309 return vrndpq_f64(a
);
2312 // CHECK-LABEL: @test_vrndmq_f64(
2313 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2314 // CHECK: [[VRNDM1_I:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> %a)
2315 // CHECK: ret <2 x double> [[VRNDM1_I]]
2316 float64x2_t
test_vrndmq_f64(float64x2_t a
) {
2317 return vrndmq_f64(a
);
2320 // CHECK-LABEL: @test_vrndxq_f64(
2321 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2322 // CHECK: [[VRNDX1_I:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> %a)
2323 // CHECK: ret <2 x double> [[VRNDX1_I]]
2324 float64x2_t
test_vrndxq_f64(float64x2_t a
) {
2325 return vrndxq_f64(a
);
2328 // CHECK-LABEL: @test_vrndq_f64(
2329 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2330 // CHECK: [[VRNDZ1_I:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a)
2331 // CHECK: ret <2 x double> [[VRNDZ1_I]]
2332 float64x2_t
test_vrndq_f64(float64x2_t a
) {
2333 return vrndq_f64(a
);
2336 // CHECK-LABEL: @test_vrndiq_f64(
2337 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2338 // CHECK: [[VRNDI1_I:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a)
2339 // CHECK: ret <2 x double> [[VRNDI1_I]]
2340 float64x2_t
test_vrndiq_f64(float64x2_t a
) {
2341 return vrndiq_f64(a
);
2344 // CHECK-LABEL: @test_vcvt_s32_f32(
2345 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2346 // CHECK: [[TMP1:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtzs.v2i32.v2f32(<2 x float> %a)
2347 // CHECK: ret <2 x i32> [[TMP1]]
2348 int32x2_t
test_vcvt_s32_f32(float32x2_t a
) {
2349 return vcvt_s32_f32(a
);
2352 // CHECK-LABEL: @test_vcvtq_s32_f32(
2353 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2354 // CHECK: [[TMP1:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtzs.v4i32.v4f32(<4 x float> %a)
2355 // CHECK: ret <4 x i32> [[TMP1]]
2356 int32x4_t
test_vcvtq_s32_f32(float32x4_t a
) {
2357 return vcvtq_s32_f32(a
);
2360 // CHECK-LABEL: @test_vcvtq_s64_f64(
2361 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2362 // CHECK: [[TMP1:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtzs.v2i64.v2f64(<2 x double> %a)
2363 // CHECK: ret <2 x i64> [[TMP1]]
2364 int64x2_t
test_vcvtq_s64_f64(float64x2_t a
) {
2365 return vcvtq_s64_f64(a
);
2368 // CHECK-LABEL: @test_vcvt_u32_f32(
2369 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2370 // CHECK: [[TMP1:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtzu.v2i32.v2f32(<2 x float> %a)
2371 // CHECK: ret <2 x i32> [[TMP1]]
2372 uint32x2_t
test_vcvt_u32_f32(float32x2_t a
) {
2373 return vcvt_u32_f32(a
);
2376 // CHECK-LABEL: @test_vcvtq_u32_f32(
2377 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2378 // CHECK: [[TMP1:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtzu.v4i32.v4f32(<4 x float> %a)
2379 // CHECK: ret <4 x i32> [[TMP1]]
2380 uint32x4_t
test_vcvtq_u32_f32(float32x4_t a
) {
2381 return vcvtq_u32_f32(a
);
2384 // CHECK-LABEL: @test_vcvtq_u64_f64(
2385 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2386 // CHECK: [[TMP1:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtzu.v2i64.v2f64(<2 x double> %a)
2387 // CHECK: ret <2 x i64> [[TMP1]]
2388 uint64x2_t
test_vcvtq_u64_f64(float64x2_t a
) {
2389 return vcvtq_u64_f64(a
);
2392 // CHECK-LABEL: @test_vcvtn_s32_f32(
2393 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2394 // CHECK: [[VCVTN1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float> %a)
2395 // CHECK: ret <2 x i32> [[VCVTN1_I]]
2396 int32x2_t
test_vcvtn_s32_f32(float32x2_t a
) {
2397 return vcvtn_s32_f32(a
);
2400 // CHECK-LABEL: @test_vcvtnq_s32_f32(
2401 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2402 // CHECK: [[VCVTN1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float> %a)
2403 // CHECK: ret <4 x i32> [[VCVTN1_I]]
2404 int32x4_t
test_vcvtnq_s32_f32(float32x4_t a
) {
2405 return vcvtnq_s32_f32(a
);
2408 // CHECK-LABEL: @test_vcvtnq_s64_f64(
2409 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2410 // CHECK: [[VCVTN1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double> %a)
2411 // CHECK: ret <2 x i64> [[VCVTN1_I]]
2412 int64x2_t
test_vcvtnq_s64_f64(float64x2_t a
) {
2413 return vcvtnq_s64_f64(a
);
2416 // CHECK-LABEL: @test_vcvtn_u32_f32(
2417 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2418 // CHECK: [[VCVTN1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float> %a)
2419 // CHECK: ret <2 x i32> [[VCVTN1_I]]
2420 uint32x2_t
test_vcvtn_u32_f32(float32x2_t a
) {
2421 return vcvtn_u32_f32(a
);
2424 // CHECK-LABEL: @test_vcvtnq_u32_f32(
2425 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2426 // CHECK: [[VCVTN1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float> %a)
2427 // CHECK: ret <4 x i32> [[VCVTN1_I]]
2428 uint32x4_t
test_vcvtnq_u32_f32(float32x4_t a
) {
2429 return vcvtnq_u32_f32(a
);
2432 // CHECK-LABEL: @test_vcvtnq_u64_f64(
2433 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2434 // CHECK: [[VCVTN1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double> %a)
2435 // CHECK: ret <2 x i64> [[VCVTN1_I]]
2436 uint64x2_t
test_vcvtnq_u64_f64(float64x2_t a
) {
2437 return vcvtnq_u64_f64(a
);
2440 // CHECK-LABEL: @test_vcvtp_s32_f32(
2441 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2442 // CHECK: [[VCVTP1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float> %a)
2443 // CHECK: ret <2 x i32> [[VCVTP1_I]]
2444 int32x2_t
test_vcvtp_s32_f32(float32x2_t a
) {
2445 return vcvtp_s32_f32(a
);
2448 // CHECK-LABEL: @test_vcvtpq_s32_f32(
2449 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2450 // CHECK: [[VCVTP1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %a)
2451 // CHECK: ret <4 x i32> [[VCVTP1_I]]
2452 int32x4_t
test_vcvtpq_s32_f32(float32x4_t a
) {
2453 return vcvtpq_s32_f32(a
);
2456 // CHECK-LABEL: @test_vcvtpq_s64_f64(
2457 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2458 // CHECK: [[VCVTP1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %a)
2459 // CHECK: ret <2 x i64> [[VCVTP1_I]]
2460 int64x2_t
test_vcvtpq_s64_f64(float64x2_t a
) {
2461 return vcvtpq_s64_f64(a
);
2464 // CHECK-LABEL: @test_vcvtp_u32_f32(
2465 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2466 // CHECK: [[VCVTP1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float> %a)
2467 // CHECK: ret <2 x i32> [[VCVTP1_I]]
2468 uint32x2_t
test_vcvtp_u32_f32(float32x2_t a
) {
2469 return vcvtp_u32_f32(a
);
2472 // CHECK-LABEL: @test_vcvtpq_u32_f32(
2473 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2474 // CHECK: [[VCVTP1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float> %a)
2475 // CHECK: ret <4 x i32> [[VCVTP1_I]]
2476 uint32x4_t
test_vcvtpq_u32_f32(float32x4_t a
) {
2477 return vcvtpq_u32_f32(a
);
2480 // CHECK-LABEL: @test_vcvtpq_u64_f64(
2481 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2482 // CHECK: [[VCVTP1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double> %a)
2483 // CHECK: ret <2 x i64> [[VCVTP1_I]]
2484 uint64x2_t
test_vcvtpq_u64_f64(float64x2_t a
) {
2485 return vcvtpq_u64_f64(a
);
2488 // CHECK-LABEL: @test_vcvtm_s32_f32(
2489 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2490 // CHECK: [[VCVTM1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float> %a)
2491 // CHECK: ret <2 x i32> [[VCVTM1_I]]
2492 int32x2_t
test_vcvtm_s32_f32(float32x2_t a
) {
2493 return vcvtm_s32_f32(a
);
2496 // CHECK-LABEL: @test_vcvtmq_s32_f32(
2497 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2498 // CHECK: [[VCVTM1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %a)
2499 // CHECK: ret <4 x i32> [[VCVTM1_I]]
2500 int32x4_t
test_vcvtmq_s32_f32(float32x4_t a
) {
2501 return vcvtmq_s32_f32(a
);
2504 // CHECK-LABEL: @test_vcvtmq_s64_f64(
2505 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2506 // CHECK: [[VCVTM1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %a)
2507 // CHECK: ret <2 x i64> [[VCVTM1_I]]
2508 int64x2_t
test_vcvtmq_s64_f64(float64x2_t a
) {
2509 return vcvtmq_s64_f64(a
);
2512 // CHECK-LABEL: @test_vcvtm_u32_f32(
2513 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2514 // CHECK: [[VCVTM1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float> %a)
2515 // CHECK: ret <2 x i32> [[VCVTM1_I]]
2516 uint32x2_t
test_vcvtm_u32_f32(float32x2_t a
) {
2517 return vcvtm_u32_f32(a
);
2520 // CHECK-LABEL: @test_vcvtmq_u32_f32(
2521 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2522 // CHECK: [[VCVTM1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %a)
2523 // CHECK: ret <4 x i32> [[VCVTM1_I]]
2524 uint32x4_t
test_vcvtmq_u32_f32(float32x4_t a
) {
2525 return vcvtmq_u32_f32(a
);
2528 // CHECK-LABEL: @test_vcvtmq_u64_f64(
2529 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2530 // CHECK: [[VCVTM1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %a)
2531 // CHECK: ret <2 x i64> [[VCVTM1_I]]
2532 uint64x2_t
test_vcvtmq_u64_f64(float64x2_t a
) {
2533 return vcvtmq_u64_f64(a
);
2536 // CHECK-LABEL: @test_vcvta_s32_f32(
2537 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2538 // CHECK: [[VCVTA1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float> %a)
2539 // CHECK: ret <2 x i32> [[VCVTA1_I]]
2540 int32x2_t
test_vcvta_s32_f32(float32x2_t a
) {
2541 return vcvta_s32_f32(a
);
2544 // CHECK-LABEL: @test_vcvtaq_s32_f32(
2545 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2546 // CHECK: [[VCVTA1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %a)
2547 // CHECK: ret <4 x i32> [[VCVTA1_I]]
2548 int32x4_t
test_vcvtaq_s32_f32(float32x4_t a
) {
2549 return vcvtaq_s32_f32(a
);
2552 // CHECK-LABEL: @test_vcvtaq_s64_f64(
2553 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2554 // CHECK: [[VCVTA1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %a)
2555 // CHECK: ret <2 x i64> [[VCVTA1_I]]
2556 int64x2_t
test_vcvtaq_s64_f64(float64x2_t a
) {
2557 return vcvtaq_s64_f64(a
);
2560 // CHECK-LABEL: @test_vcvta_u32_f32(
2561 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2562 // CHECK: [[VCVTA1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float> %a)
2563 // CHECK: ret <2 x i32> [[VCVTA1_I]]
2564 uint32x2_t
test_vcvta_u32_f32(float32x2_t a
) {
2565 return vcvta_u32_f32(a
);
2568 // CHECK-LABEL: @test_vcvtaq_u32_f32(
2569 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2570 // CHECK: [[VCVTA1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %a)
2571 // CHECK: ret <4 x i32> [[VCVTA1_I]]
2572 uint32x4_t
test_vcvtaq_u32_f32(float32x4_t a
) {
2573 return vcvtaq_u32_f32(a
);
2576 // CHECK-LABEL: @test_vcvtaq_u64_f64(
2577 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2578 // CHECK: [[VCVTA1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %a)
2579 // CHECK: ret <2 x i64> [[VCVTA1_I]]
2580 uint64x2_t
test_vcvtaq_u64_f64(float64x2_t a
) {
2581 return vcvtaq_u64_f64(a
);
2584 // CHECK-LABEL: @test_vrsqrte_f32(
2585 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2586 // CHECK: [[VRSQRTE_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> %a)
2587 // CHECK: ret <2 x float> [[VRSQRTE_V1_I]]
2588 float32x2_t
test_vrsqrte_f32(float32x2_t a
) {
2589 return vrsqrte_f32(a
);
2592 // CHECK-LABEL: @test_vrsqrteq_f32(
2593 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2594 // CHECK: [[VRSQRTEQ_V1_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> %a)
2595 // CHECK: ret <4 x float> [[VRSQRTEQ_V1_I]]
2596 float32x4_t
test_vrsqrteq_f32(float32x4_t a
) {
2597 return vrsqrteq_f32(a
);
2600 // CHECK-LABEL: @test_vrsqrteq_f64(
2601 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2602 // CHECK: [[VRSQRTEQ_V1_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double> %a)
2603 // CHECK: ret <2 x double> [[VRSQRTEQ_V1_I]]
2604 float64x2_t
test_vrsqrteq_f64(float64x2_t a
) {
2605 return vrsqrteq_f64(a
);
2608 // CHECK-LABEL: @test_vrecpe_f32(
2609 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2610 // CHECK: [[VRECPE_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float> %a)
2611 // CHECK: ret <2 x float> [[VRECPE_V1_I]]
2612 float32x2_t
test_vrecpe_f32(float32x2_t a
) {
2613 return vrecpe_f32(a
);
2616 // CHECK-LABEL: @test_vrecpeq_f32(
2617 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2618 // CHECK: [[VRECPEQ_V1_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float> %a)
2619 // CHECK: ret <4 x float> [[VRECPEQ_V1_I]]
2620 float32x4_t
test_vrecpeq_f32(float32x4_t a
) {
2621 return vrecpeq_f32(a
);
2624 // CHECK-LABEL: @test_vrecpeq_f64(
2625 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2626 // CHECK: [[VRECPEQ_V1_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double> %a)
2627 // CHECK: ret <2 x double> [[VRECPEQ_V1_I]]
2628 float64x2_t
test_vrecpeq_f64(float64x2_t a
) {
2629 return vrecpeq_f64(a
);
2632 // CHECK-LABEL: @test_vrecpe_u32(
2633 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
2634 // CHECK: [[VRECPE_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32> %a)
2635 // CHECK: ret <2 x i32> [[VRECPE_V1_I]]
2636 uint32x2_t
test_vrecpe_u32(uint32x2_t a
) {
2637 return vrecpe_u32(a
);
2640 // CHECK-LABEL: @test_vrecpeq_u32(
2641 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
2642 // CHECK: [[VRECPEQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32> %a)
2643 // CHECK: ret <4 x i32> [[VRECPEQ_V1_I]]
2644 uint32x4_t
test_vrecpeq_u32(uint32x4_t a
) {
2645 return vrecpeq_u32(a
);
2648 // CHECK-LABEL: @test_vsqrt_f32(
2649 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
2650 // CHECK: [[VSQRT_I:%.*]] = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %a)
2651 // CHECK: ret <2 x float> [[VSQRT_I]]
2652 float32x2_t
test_vsqrt_f32(float32x2_t a
) {
2653 return vsqrt_f32(a
);
2656 // CHECK-LABEL: @test_vsqrtq_f32(
2657 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
2658 // CHECK: [[VSQRT_I:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
2659 // CHECK: ret <4 x float> [[VSQRT_I]]
2660 float32x4_t
test_vsqrtq_f32(float32x4_t a
) {
2661 return vsqrtq_f32(a
);
2664 // CHECK-LABEL: @test_vsqrtq_f64(
2665 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
2666 // CHECK: [[VSQRT_I:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a)
2667 // CHECK: ret <2 x double> [[VSQRT_I]]
2668 float64x2_t
test_vsqrtq_f64(float64x2_t a
) {
2669 return vsqrtq_f64(a
);
2672 // CHECK-LABEL: @test_vcvt_f32_s32(
2673 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
2674 // CHECK: [[VCVT_I:%.*]] = sitofp <2 x i32> %a to <2 x float>
2675 // CHECK: ret <2 x float> [[VCVT_I]]
2676 float32x2_t
test_vcvt_f32_s32(int32x2_t a
) {
2677 return vcvt_f32_s32(a
);
2680 // CHECK-LABEL: @test_vcvt_f32_u32(
2681 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
2682 // CHECK: [[VCVT_I:%.*]] = uitofp <2 x i32> %a to <2 x float>
2683 // CHECK: ret <2 x float> [[VCVT_I]]
2684 float32x2_t
test_vcvt_f32_u32(uint32x2_t a
) {
2685 return vcvt_f32_u32(a
);
2688 // CHECK-LABEL: @test_vcvtq_f32_s32(
2689 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
2690 // CHECK: [[VCVT_I:%.*]] = sitofp <4 x i32> %a to <4 x float>
2691 // CHECK: ret <4 x float> [[VCVT_I]]
2692 float32x4_t
test_vcvtq_f32_s32(int32x4_t a
) {
2693 return vcvtq_f32_s32(a
);
2696 // CHECK-LABEL: @test_vcvtq_f32_u32(
2697 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
2698 // CHECK: [[VCVT_I:%.*]] = uitofp <4 x i32> %a to <4 x float>
2699 // CHECK: ret <4 x float> [[VCVT_I]]
2700 float32x4_t
test_vcvtq_f32_u32(uint32x4_t a
) {
2701 return vcvtq_f32_u32(a
);
2704 // CHECK-LABEL: @test_vcvtq_f64_s64(
2705 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
2706 // CHECK: [[VCVT_I:%.*]] = sitofp <2 x i64> %a to <2 x double>
2707 // CHECK: ret <2 x double> [[VCVT_I]]
2708 float64x2_t
test_vcvtq_f64_s64(int64x2_t a
) {
2709 return vcvtq_f64_s64(a
);
2712 // CHECK-LABEL: @test_vcvtq_f64_u64(
2713 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
2714 // CHECK: [[VCVT_I:%.*]] = uitofp <2 x i64> %a to <2 x double>
2715 // CHECK: ret <2 x double> [[VCVT_I]]
2716 float64x2_t
test_vcvtq_f64_u64(uint64x2_t a
) {
2717 return vcvtq_f64_u64(a
);