Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / neon-misc-constrained.c
blobe24e129d2bc7d0d89f25713aa13ec1687b182676
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
4 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
5 // RUN: -ffp-exception-behavior=strict \
6 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
7 // RUN: | opt -S -passes=mem2reg | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
8 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
9 // RUN: -disable-O0-optnone -S -o - %s \
10 // RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
11 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
12 // RUN: -ffp-exception-behavior=strict \
13 // RUN: -disable-O0-optnone -S -o - %s \
14 // RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
16 // REQUIRES: aarch64-registered-target
18 // Test new aarch64 intrinsics and types but constrained
20 #include <arm_neon.h>
22 // COMMON-LABEL: test_vrndaq_f64
23 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
24 // UNCONSTRAINED: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> %a)
25 // CONSTRAINED: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %a, metadata !"fpexcept.strict")
26 // CHECK-ASM: frinta v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
27 // COMMONIR: ret <2 x double> [[VRNDA1_I]]
28 float64x2_t test_vrndaq_f64(float64x2_t a) {
29 return vrndaq_f64(a);
32 // COMMON-LABEL: test_vrndpq_f64
33 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
34 // UNCONSTRAINED: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a)
35 // CONSTRAINED: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %a, metadata !"fpexcept.strict")
36 // CHECK-ASM: frintp v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
37 // COMMONIR: ret <2 x double> [[VRNDP1_I]]
38 float64x2_t test_vrndpq_f64(float64x2_t a) {
39 return vrndpq_f64(a);
42 // COMMON-LABEL: test_vsqrtq_f32
43 // COMMONIR: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
44 // UNCONSTRAINED: [[VSQRT_I:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
45 // CONSTRAINED: [[VSQRT_I:%.*]] = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
46 // CHECK-ASM: fsqrt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
47 // COMMONIR: ret <4 x float> [[VSQRT_I]]
48 float32x4_t test_vsqrtq_f32(float32x4_t a) {
49 return vsqrtq_f32(a);
52 // COMMON-LABEL: test_vsqrtq_f64
53 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
54 // UNCONSTRAINED: [[VSQRT_I:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a)
55 // CONSTRAINED: [[VSQRT_I:%.*]] = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
56 // CHECK-ASM: fsqrt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
57 // COMMONIR: ret <2 x double> [[VSQRT_I]]
58 float64x2_t test_vsqrtq_f64(float64x2_t a) {
59 return vsqrtq_f64(a);