Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / v8.2a-fp16-intrinsics.c
blob90ee74e459ebd4e0d5fb5f0efd96095b61fa7c43
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16\
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg \
4 // RUN: | FileCheck %s
6 // REQUIRES: aarch64-registered-target
8 #include <arm_fp16.h>
10 // CHECK-LABEL: test_vabsh_f16
11 // CHECK: [[ABS:%.*]] = call half @llvm.fabs.f16(half %a)
12 // CHECK: ret half [[ABS]]
13 float16_t test_vabsh_f16(float16_t a) {
14 return vabsh_f16(a);
17 // CHECK-LABEL: test_vceqzh_f16
18 // CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
19 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
20 // CHECK: ret i16 [[TMP2]]
21 uint16_t test_vceqzh_f16(float16_t a) {
22 return vceqzh_f16(a);
25 // CHECK-LABEL: test_vcgezh_f16
26 // CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
27 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
28 // CHECK: ret i16 [[TMP2]]
29 uint16_t test_vcgezh_f16(float16_t a) {
30 return vcgezh_f16(a);
33 // CHECK-LABEL: test_vcgtzh_f16
34 // CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
35 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
36 // CHECK: ret i16 [[TMP2]]
37 uint16_t test_vcgtzh_f16(float16_t a) {
38 return vcgtzh_f16(a);
41 // CHECK-LABEL: test_vclezh_f16
42 // CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
43 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
44 // CHECK: ret i16 [[TMP2]]
45 uint16_t test_vclezh_f16(float16_t a) {
46 return vclezh_f16(a);
49 // CHECK-LABEL: test_vcltzh_f16
50 // CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
51 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
52 // CHECK: ret i16 [[TMP2]]
53 uint16_t test_vcltzh_f16(float16_t a) {
54 return vcltzh_f16(a);
57 // CHECK-LABEL: test_vcvth_f16_s16
58 // CHECK: [[VCVT:%.*]] = sitofp i16 %a to half
59 // CHECK: ret half [[VCVT]]
60 float16_t test_vcvth_f16_s16 (int16_t a) {
61 return vcvth_f16_s16(a);
64 // CHECK-LABEL: test_vcvth_f16_s32
65 // CHECK: [[VCVT:%.*]] = sitofp i32 %a to half
66 // CHECK: ret half [[VCVT]]
67 float16_t test_vcvth_f16_s32 (int32_t a) {
68 return vcvth_f16_s32(a);
71 // CHECK-LABEL: test_vcvth_f16_s64
72 // CHECK: [[VCVT:%.*]] = sitofp i64 %a to half
73 // CHECK: ret half [[VCVT]]
74 float16_t test_vcvth_f16_s64 (int64_t a) {
75 return vcvth_f16_s64(a);
78 // CHECK-LABEL: test_vcvth_f16_u16
79 // CHECK: [[VCVT:%.*]] = uitofp i16 %a to half
80 // CHECK: ret half [[VCVT]]
81 float16_t test_vcvth_f16_u16 (uint16_t a) {
82 return vcvth_f16_u16(a);
85 // CHECK-LABEL: test_vcvth_f16_u32
86 // CHECK: [[VCVT:%.*]] = uitofp i32 %a to half
87 // CHECK: ret half [[VCVT]]
88 float16_t test_vcvth_f16_u32 (uint32_t a) {
89 return vcvth_f16_u32(a);
92 // CHECK-LABEL: test_vcvth_f16_u64
93 // CHECK: [[VCVT:%.*]] = uitofp i64 %a to half
94 // CHECK: ret half [[VCVT]]
95 float16_t test_vcvth_f16_u64 (uint64_t a) {
96 return vcvth_f16_u64(a);
99 // CHECK-LABEL: test_vcvth_s16_f16
100 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
101 // CHECK: [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16
102 // CHECK: ret i16 [[TRUNC]]
103 int16_t test_vcvth_s16_f16 (float16_t a) {
104 return vcvth_s16_f16(a);
107 // CHECK-LABEL: test_vcvth_s32_f16
108 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
109 // CHECK: ret i32 [[VCVT]]
110 int32_t test_vcvth_s32_f16 (float16_t a) {
111 return vcvth_s32_f16(a);
114 // CHECK-LABEL: test_vcvth_s64_f16
115 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a)
116 // CHECK: ret i64 [[VCVT]]
117 int64_t test_vcvth_s64_f16 (float16_t a) {
118 return vcvth_s64_f16(a);
121 // CHECK-LABEL: test_vcvth_u16_f16
122 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
123 // CHECK: [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16
124 // CHECK: ret i16 [[TRUNC]]
125 uint16_t test_vcvth_u16_f16 (float16_t a) {
126 return vcvth_u16_f16(a);
129 // CHECK-LABEL: test_vcvth_u32_f16
130 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
131 // CHECK: ret i32 [[VCVT]]
132 uint32_t test_vcvth_u32_f16 (float16_t a) {
133 return vcvth_u32_f16(a);
136 // CHECK-LABEL: test_vcvth_u64_f16
137 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a)
138 // CHECK: ret i64 [[VCVT]]
139 uint64_t test_vcvth_u64_f16 (float16_t a) {
140 return vcvth_u64_f16(a);
143 // CHECK-LABEL: test_vcvtah_s16_f16
144 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
145 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
146 // CHECK: ret i16 [[RET]]
147 int16_t test_vcvtah_s16_f16 (float16_t a) {
148 return vcvtah_s16_f16(a);
151 // CHECK-LABEL: test_vcvtah_s32_f16
152 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
153 // CHECK: ret i32 [[VCVT]]
154 int32_t test_vcvtah_s32_f16 (float16_t a) {
155 return vcvtah_s32_f16(a);
158 // CHECK-LABEL: test_vcvtah_s64_f16
159 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a)
160 // CHECK: ret i64 [[VCVT]]
161 int64_t test_vcvtah_s64_f16 (float16_t a) {
162 return vcvtah_s64_f16(a);
165 // CHECK-LABEL: test_vcvtah_u16_f16
166 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
167 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
168 // CHECK: ret i16 [[RET]]
169 uint16_t test_vcvtah_u16_f16 (float16_t a) {
170 return vcvtah_u16_f16(a);
173 // CHECK-LABEL: test_vcvtah_u32_f16
174 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
175 // CHECK: ret i32 [[VCVT]]
176 uint32_t test_vcvtah_u32_f16 (float16_t a) {
177 return vcvtah_u32_f16(a);
180 // CHECK-LABEL: test_vcvtah_u64_f16
181 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a)
182 // CHECK: ret i64 [[VCVT]]
183 uint64_t test_vcvtah_u64_f16 (float16_t a) {
184 return vcvtah_u64_f16(a);
187 // CHECK-LABEL: test_vcvtmh_s16_f16
188 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
189 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
190 // CHECK: ret i16 [[RET]]
191 int16_t test_vcvtmh_s16_f16 (float16_t a) {
192 return vcvtmh_s16_f16(a);
195 // CHECK-LABEL: test_vcvtmh_s32_f16
196 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
197 // CHECK: ret i32 [[VCVT]]
198 int32_t test_vcvtmh_s32_f16 (float16_t a) {
199 return vcvtmh_s32_f16(a);
202 // CHECK-LABEL: test_vcvtmh_s64_f16
203 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a)
204 // CHECK: ret i64 [[VCVT]]
205 int64_t test_vcvtmh_s64_f16 (float16_t a) {
206 return vcvtmh_s64_f16(a);
209 // CHECK-LABEL: test_vcvtmh_u16_f16
210 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
211 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
212 // CHECK: ret i16 [[RET]]
213 uint16_t test_vcvtmh_u16_f16 (float16_t a) {
214 return vcvtmh_u16_f16(a);
217 // CHECK-LABEL: test_vcvtmh_u32_f16
218 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
219 // CHECK: ret i32 [[VCVT]]
220 uint32_t test_vcvtmh_u32_f16 (float16_t a) {
221 return vcvtmh_u32_f16(a);
224 // CHECK-LABEL: test_vcvtmh_u64_f16
225 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a)
226 // CHECK: ret i64 [[VCVT]]
227 uint64_t test_vcvtmh_u64_f16 (float16_t a) {
228 return vcvtmh_u64_f16(a);
231 // CHECK-LABEL: test_vcvtnh_s16_f16
232 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
233 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
234 // CHECK: ret i16 [[RET]]
235 int16_t test_vcvtnh_s16_f16 (float16_t a) {
236 return vcvtnh_s16_f16(a);
239 // CHECK-LABEL: test_vcvtnh_s32_f16
240 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
241 // CHECK: ret i32 [[VCVT]]
242 int32_t test_vcvtnh_s32_f16 (float16_t a) {
243 return vcvtnh_s32_f16(a);
246 // CHECK-LABEL: test_vcvtnh_s64_f16
247 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a)
248 // CHECK: ret i64 [[VCVT]]
249 int64_t test_vcvtnh_s64_f16 (float16_t a) {
250 return vcvtnh_s64_f16(a);
253 // CHECK-LABEL: test_vcvtnh_u16_f16
254 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
255 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
256 // CHECK: ret i16 [[RET]]
257 uint16_t test_vcvtnh_u16_f16 (float16_t a) {
258 return vcvtnh_u16_f16(a);
261 // CHECK-LABEL: test_vcvtnh_u32_f16
262 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
263 // CHECK: ret i32 [[VCVT]]
264 uint32_t test_vcvtnh_u32_f16 (float16_t a) {
265 return vcvtnh_u32_f16(a);
268 // CHECK-LABEL: test_vcvtnh_u64_f16
269 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a)
270 // CHECK: ret i64 [[VCVT]]
271 uint64_t test_vcvtnh_u64_f16 (float16_t a) {
272 return vcvtnh_u64_f16(a);
275 // CHECK-LABEL: test_vcvtph_s16_f16
276 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
277 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
278 // CHECK: ret i16 [[RET]]
279 int16_t test_vcvtph_s16_f16 (float16_t a) {
280 return vcvtph_s16_f16(a);
283 // CHECK-LABEL: test_vcvtph_s32_f16
284 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
285 // CHECK: ret i32 [[VCVT]]
286 int32_t test_vcvtph_s32_f16 (float16_t a) {
287 return vcvtph_s32_f16(a);
290 // CHECK-LABEL: test_vcvtph_s64_f16
291 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a)
292 // CHECK: ret i64 [[VCVT]]
293 int64_t test_vcvtph_s64_f16 (float16_t a) {
294 return vcvtph_s64_f16(a);
297 // CHECK-LABEL: test_vcvtph_u16_f16
298 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
299 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
300 // CHECK: ret i16 [[RET]]
301 uint16_t test_vcvtph_u16_f16 (float16_t a) {
302 return vcvtph_u16_f16(a);
305 // CHECK-LABEL: test_vcvtph_u32_f16
306 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
307 // CHECK: ret i32 [[VCVT]]
308 uint32_t test_vcvtph_u32_f16 (float16_t a) {
309 return vcvtph_u32_f16(a);
312 // CHECK-LABEL: test_vcvtph_u64_f16
313 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a)
314 // CHECK: ret i64 [[VCVT]]
315 uint64_t test_vcvtph_u64_f16 (float16_t a) {
316 return vcvtph_u64_f16(a);
319 // CHECK-LABEL: test_vnegh_f16
320 // CHECK: [[NEG:%.*]] = fneg half %a
321 // CHECK: ret half [[NEG]]
322 float16_t test_vnegh_f16(float16_t a) {
323 return vnegh_f16(a);
326 // CHECK-LABEL: test_vrecpeh_f16
327 // CHECK: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpe.f16(half %a)
328 // CHECK: ret half [[VREC]]
329 float16_t test_vrecpeh_f16(float16_t a) {
330 return vrecpeh_f16(a);
333 // CHECK-LABEL: test_vrecpxh_f16
334 // CHECK: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpx.f16(half %a)
335 // CHECK: ret half [[VREC]]
336 float16_t test_vrecpxh_f16(float16_t a) {
337 return vrecpxh_f16(a);
340 // CHECK-LABEL: test_vrndh_f16
341 // CHECK: [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
342 // CHECK: ret half [[RND]]
343 float16_t test_vrndh_f16(float16_t a) {
344 return vrndh_f16(a);
347 // CHECK-LABEL: test_vrndah_f16
348 // CHECK: [[RND:%.*]] = call half @llvm.round.f16(half %a)
349 // CHECK: ret half [[RND]]
350 float16_t test_vrndah_f16(float16_t a) {
351 return vrndah_f16(a);
354 // CHECK-LABEL: test_vrndih_f16
355 // CHECK: [[RND:%.*]] = call half @llvm.nearbyint.f16(half %a)
356 // CHECK: ret half [[RND]]
357 float16_t test_vrndih_f16(float16_t a) {
358 return vrndih_f16(a);
361 // CHECK-LABEL: test_vrndmh_f16
362 // CHECK: [[RND:%.*]] = call half @llvm.floor.f16(half %a)
363 // CHECK: ret half [[RND]]
364 float16_t test_vrndmh_f16(float16_t a) {
365 return vrndmh_f16(a);
368 // CHECK-LABEL: test_vrndnh_f16
369 // CHECK: [[RND:%.*]] = call half @llvm.roundeven.f16(half %a)
370 // CHECK: ret half [[RND]]
371 float16_t test_vrndnh_f16(float16_t a) {
372 return vrndnh_f16(a);
375 // CHECK-LABEL: test_vrndph_f16
376 // CHECK: [[RND:%.*]] = call half @llvm.ceil.f16(half %a)
377 // CHECK: ret half [[RND]]
378 float16_t test_vrndph_f16(float16_t a) {
379 return vrndph_f16(a);
382 // CHECK-LABEL: test_vrndxh_f16
383 // CHECK: [[RND:%.*]] = call half @llvm.rint.f16(half %a)
384 // CHECK: ret half [[RND]]
385 float16_t test_vrndxh_f16(float16_t a) {
386 return vrndxh_f16(a);
389 // CHECK-LABEL: test_vrsqrteh_f16
390 // CHECK: [[RND:%.*]] = call half @llvm.aarch64.neon.frsqrte.f16(half %a)
391 // CHECK: ret half [[RND]]
392 float16_t test_vrsqrteh_f16(float16_t a) {
393 return vrsqrteh_f16(a);
396 // CHECK-LABEL: test_vsqrth_f16
397 // CHECK: [[SQR:%.*]] = call half @llvm.sqrt.f16(half %a)
398 // CHECK: ret half [[SQR]]
399 float16_t test_vsqrth_f16(float16_t a) {
400 return vsqrth_f16(a);
403 // CHECK-LABEL: test_vaddh_f16
404 // CHECK: [[ADD:%.*]] = fadd half %a, %b
405 // CHECK: ret half [[ADD]]
406 float16_t test_vaddh_f16(float16_t a, float16_t b) {
407 return vaddh_f16(a, b);
410 // CHECK-LABEL: test_vabdh_f16
411 // CHECK: [[ABD:%.*]] = call half @llvm.aarch64.sisd.fabd.f16(half %a, half %b)
412 // CHECK: ret half [[ABD]]
413 float16_t test_vabdh_f16(float16_t a, float16_t b) {
414 return vabdh_f16(a, b);
417 // CHECK-LABEL: test_vcageh_f16
418 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %a, half %b)
419 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
420 // CHECK: ret i16 [[RET]]
421 uint16_t test_vcageh_f16(float16_t a, float16_t b) {
422 return vcageh_f16(a, b);
425 // CHECK-LABEL: test_vcagth_f16
426 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %a, half %b)
427 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
428 // CHECK: ret i16 [[RET]]
429 uint16_t test_vcagth_f16(float16_t a, float16_t b) {
430 return vcagth_f16(a, b);
433 // CHECK-LABEL: test_vcaleh_f16
434 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %b, half %a)
435 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
436 // CHECK: ret i16 [[RET]]
437 uint16_t test_vcaleh_f16(float16_t a, float16_t b) {
438 return vcaleh_f16(a, b);
441 // CHECK-LABEL: test_vcalth_f16
442 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %b, half %a)
443 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
444 // CHECK: ret i16 [[RET]]
445 uint16_t test_vcalth_f16(float16_t a, float16_t b) {
446 return vcalth_f16(a, b);
449 // CHECK-LABEL: test_vceqh_f16
450 // CHECK: [[TMP1:%.*]] = fcmp oeq half %a, %b
451 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
452 // CHECK: ret i16 [[TMP2]]
453 uint16_t test_vceqh_f16(float16_t a, float16_t b) {
454 return vceqh_f16(a, b);
457 // CHECK-LABEL: test_vcgeh_f16
458 // CHECK: [[TMP1:%.*]] = fcmp oge half %a, %b
459 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
460 // CHECK: ret i16 [[TMP2]]
461 uint16_t test_vcgeh_f16(float16_t a, float16_t b) {
462 return vcgeh_f16(a, b);
465 // CHECK-LABEL: test_vcgth_f16
466 //CHECK: [[TMP1:%.*]] = fcmp ogt half %a, %b
467 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
468 // CHECK: ret i16 [[TMP2]]
469 uint16_t test_vcgth_f16(float16_t a, float16_t b) {
470 return vcgth_f16(a, b);
473 // CHECK-LABEL: test_vcleh_f16
474 // CHECK: [[TMP1:%.*]] = fcmp ole half %a, %b
475 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
476 // CHECK: ret i16 [[TMP2]]
477 uint16_t test_vcleh_f16(float16_t a, float16_t b) {
478 return vcleh_f16(a, b);
481 // CHECK-LABEL: test_vclth_f16
482 // CHECK: [[TMP1:%.*]] = fcmp olt half %a, %b
483 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
484 // CHECK: ret i16 [[TMP2]]
485 uint16_t test_vclth_f16(float16_t a, float16_t b) {
486 return vclth_f16(a, b);
489 // CHECK-LABEL: test_vcvth_n_f16_s16
490 // CHECK: [[SEXT:%.*]] = sext i16 %a to i32
491 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 [[SEXT]], i32 1)
492 // CHECK: ret half [[CVT]]
493 float16_t test_vcvth_n_f16_s16(int16_t a) {
494 return vcvth_n_f16_s16(a, 1);
497 // CHECK-LABEL: test_vcvth_n_f16_s32
498 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %a, i32 1)
499 // CHECK: ret half [[CVT]]
500 float16_t test_vcvth_n_f16_s32(int32_t a) {
501 return vcvth_n_f16_s32(a, 1);
504 // CHECK-LABEL: test_vcvth_n_f16_s64
505 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 1)
506 // CHECK: ret half [[CVT]]
507 float16_t test_vcvth_n_f16_s64(int64_t a) {
508 return vcvth_n_f16_s64(a, 1);
511 // CHECK-LABEL: test_vcvth_n_s16_f16
512 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1)
513 // CHECK: [[RET:%.*]] = trunc i32 [[CVT]] to i16
514 // CHECK: ret i16 [[RET]]
515 int16_t test_vcvth_n_s16_f16(float16_t a) {
516 return vcvth_n_s16_f16(a, 1);
519 // CHECK-LABEL: test_vcvth_n_s32_f16
520 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1)
521 // CHECK: ret i32 [[CVT]]
522 int32_t test_vcvth_n_s32_f16(float16_t a) {
523 return vcvth_n_s32_f16(a, 1);
526 // CHECK-LABEL: test_vcvth_n_s64_f16
527 // CHECK: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half %a, i32 1)
528 // CHECK: ret i64 [[CVT]]
529 int64_t test_vcvth_n_s64_f16(float16_t a) {
530 return vcvth_n_s64_f16(a, 1);
533 // CHECK-LABEL: test_vcvth_n_f16_u16
534 // CHECK: [[SEXT:%.*]] = zext i16 %a to i32
535 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 [[SEXT]], i32 1)
536 // CHECK: ret half [[CVT]]
537 float16_t test_vcvth_n_f16_u16(int16_t a) {
538 return vcvth_n_f16_u16(a, 1);
541 // CHECK-LABEL: test_vcvth_n_f16_u32
542 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %a, i32 1)
543 // CHECK: ret half [[CVT]]
544 float16_t test_vcvth_n_f16_u32(int32_t a) {
545 return vcvth_n_f16_u32(a, 1);
548 // CHECK-LABEL: test_vcvth_n_f16_u64
549 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i64(i64 %a, i32 1)
550 // CHECK: ret half [[CVT]]
551 float16_t test_vcvth_n_f16_u64(int64_t a) {
552 return vcvth_n_f16_u64(a, 1);
555 // CHECK-LABEL: test_vcvth_n_u16_f16
556 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1)
557 // CHECK: [[RET:%.*]] = trunc i32 [[CVT]] to i16
558 // CHECK: ret i16 [[RET]]
559 int16_t test_vcvth_n_u16_f16(float16_t a) {
560 return vcvth_n_u16_f16(a, 1);
563 // CHECK-LABEL: test_vcvth_n_u32_f16
564 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1)
565 // CHECK: ret i32 [[CVT]]
566 int32_t test_vcvth_n_u32_f16(float16_t a) {
567 return vcvth_n_u32_f16(a, 1);
570 // CHECK-LABEL: test_vcvth_n_u64_f16
571 // CHECK: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxu.i64.f16(half %a, i32 1)
572 // CHECK: ret i64 [[CVT]]
573 int64_t test_vcvth_n_u64_f16(float16_t a) {
574 return vcvth_n_u64_f16(a, 1);
577 // CHECK-LABEL: test_vdivh_f16
578 // CHECK: [[DIV:%.*]] = fdiv half %a, %b
579 // CHECK: ret half [[DIV]]
580 float16_t test_vdivh_f16(float16_t a, float16_t b) {
581 return vdivh_f16(a, b);
584 // CHECK-LABEL: test_vmaxh_f16
585 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmax.f16(half %a, half %b)
586 // CHECK: ret half [[MAX]]
587 float16_t test_vmaxh_f16(float16_t a, float16_t b) {
588 return vmaxh_f16(a, b);
591 // CHECK-LABEL: test_vmaxnmh_f16
592 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxnm.f16(half %a, half %b)
593 // CHECK: ret half [[MAX]]
594 float16_t test_vmaxnmh_f16(float16_t a, float16_t b) {
595 return vmaxnmh_f16(a, b);
598 // CHECK-LABEL: test_vminh_f16
599 // CHECK: [[MIN:%.*]] = call half @llvm.aarch64.neon.fmin.f16(half %a, half %b)
600 // CHECK: ret half [[MIN]]
601 float16_t test_vminh_f16(float16_t a, float16_t b) {
602 return vminh_f16(a, b);
605 // CHECK-LABEL: test_vminnmh_f16
606 // CHECK: [[MIN:%.*]] = call half @llvm.aarch64.neon.fminnm.f16(half %a, half %b)
607 // CHECK: ret half [[MIN]]
608 float16_t test_vminnmh_f16(float16_t a, float16_t b) {
609 return vminnmh_f16(a, b);
612 // CHECK-LABEL: test_vmulh_f16
613 // CHECK: [[MUL:%.*]] = fmul half %a, %b
614 // CHECK: ret half [[MUL]]
615 float16_t test_vmulh_f16(float16_t a, float16_t b) {
616 return vmulh_f16(a, b);
619 // CHECK-LABEL: test_vmulxh_f16
620 // CHECK: [[MUL:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half %b)
621 // CHECK: ret half [[MUL]]
622 float16_t test_vmulxh_f16(float16_t a, float16_t b) {
623 return vmulxh_f16(a, b);
626 // CHECK-LABEL: test_vrecpsh_f16
627 // CHECK: [[RECPS:%.*]] = call half @llvm.aarch64.neon.frecps.f16(half %a, half %b)
628 // CHECK: ret half [[RECPS]]
629 float16_t test_vrecpsh_f16(float16_t a, float16_t b) {
630 return vrecpsh_f16(a, b);
633 // CHECK-LABEL: test_vrsqrtsh_f16
634 // CHECK: [[RSQRTS:%.*]] = call half @llvm.aarch64.neon.frsqrts.f16(half %a, half %b)
635 // CHECK: ret half [[RSQRTS]]
636 float16_t test_vrsqrtsh_f16(float16_t a, float16_t b) {
637 return vrsqrtsh_f16(a, b);
640 // CHECK-LABEL: test_vsubh_f16
641 // CHECK: [[SUB:%.*]] = fsub half %a, %b
642 // CHECK: ret half [[SUB]]
643 float16_t test_vsubh_f16(float16_t a, float16_t b) {
644 return vsubh_f16(a, b);
647 // CHECK-LABEL: test_vfmah_f16
648 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half %c, half %a)
649 // CHECK: ret half [[FMA]]
650 float16_t test_vfmah_f16(float16_t a, float16_t b, float16_t c) {
651 return vfmah_f16(a, b, c);
654 // CHECK-LABEL: test_vfmsh_f16
655 // CHECK: [[SUB:%.*]] = fneg half %b
656 // CHECK: [[ADD:%.*]] = call half @llvm.fma.f16(half [[SUB]], half %c, half %a)
657 // CHECK: ret half [[ADD]]
658 float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
659 return vfmsh_f16(a, b, c);