Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / arm-cde-vfp.c
blob0e219fc76ce75c412e42b3ebe915c7fc82613c12
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi \
3 // RUN: -target-feature +cdecp0 -target-feature +cdecp1 \
4 // RUN: -mfloat-abi hard -O0 -disable-O0-optnone \
5 // RUN: -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
7 // REQUIRES: aarch64-registered-target || arm-registered-target
9 #include <arm_cde.h>
11 // CHECK-LABEL: @test_vcx1_u32(
12 // CHECK-NEXT: entry:
13 // CHECK-NEXT: [[TMP0:%.*]] = call float @llvm.arm.cde.vcx1.f32(i32 0, i32 11)
14 // CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
15 // CHECK-NEXT: ret i32 [[TMP1]]
17 uint32_t test_vcx1_u32(void) {
18 return __arm_vcx1_u32(0, 11);
21 // CHECK-LABEL: @test_vcx1a_u32(
22 // CHECK-NEXT: entry:
23 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[ACC:%.*]] to float
24 // CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.arm.cde.vcx1a.f32(i32 1, float [[TMP0]], i32 12)
25 // CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[TMP1]] to i32
26 // CHECK-NEXT: ret i32 [[TMP2]]
28 uint32_t test_vcx1a_u32(uint32_t acc) {
29 return __arm_vcx1a_u32(1, acc, 12);
32 // CHECK-LABEL: @test_vcx2_u32(
33 // CHECK-NEXT: entry:
34 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[N:%.*]] to float
35 // CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.arm.cde.vcx2.f32(i32 0, float [[TMP0]], i32 21)
36 // CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[TMP1]] to i32
37 // CHECK-NEXT: ret i32 [[TMP2]]
39 uint32_t test_vcx2_u32(uint32_t n) {
40 return __arm_vcx2_u32(0, n, 21);
43 // CHECK-LABEL: @test_vcx2a_u32(
44 // CHECK-NEXT: entry:
45 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[ACC:%.*]] to float
46 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[N:%.*]] to float
47 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.cde.vcx2a.f32(i32 0, float [[TMP0]], float [[TMP1]], i32 22)
48 // CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[TMP2]] to i32
49 // CHECK-NEXT: ret i32 [[TMP3]]
51 uint32_t test_vcx2a_u32(uint32_t acc, uint32_t n) {
52 return __arm_vcx2a_u32(0, acc, n, 22);
55 // CHECK-LABEL: @test_vcx3_u32(
56 // CHECK-NEXT: entry:
57 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[N:%.*]] to float
58 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[M:%.*]] to float
59 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.cde.vcx3.f32(i32 1, float [[TMP0]], float [[TMP1]], i32 3)
60 // CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[TMP2]] to i32
61 // CHECK-NEXT: ret i32 [[TMP3]]
63 uint32_t test_vcx3_u32(uint32_t n, uint32_t m) {
64 return __arm_vcx3_u32(1, n, m, 3);
67 // CHECK-LABEL: @test_vcx3a_u32(
68 // CHECK-NEXT: entry:
69 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[ACC:%.*]] to float
70 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[N:%.*]] to float
71 // CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[M:%.*]] to float
72 // CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.arm.cde.vcx3a.f32(i32 0, float [[TMP0]], float [[TMP1]], float [[TMP2]], i32 5)
73 // CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[TMP3]] to i32
74 // CHECK-NEXT: ret i32 [[TMP4]]
76 uint32_t test_vcx3a_u32(uint32_t acc, uint32_t n, uint32_t m) {
77 return __arm_vcx3a_u32(0, acc, n, m, 5);
80 // CHECK-LABEL: @test_vcx1d_u64(
81 // CHECK-NEXT: entry:
82 // CHECK-NEXT: [[TMP0:%.*]] = call double @llvm.arm.cde.vcx1.f64(i32 0, i32 11)
83 // CHECK-NEXT: [[TMP1:%.*]] = bitcast double [[TMP0]] to i64
84 // CHECK-NEXT: ret i64 [[TMP1]]
86 uint64_t test_vcx1d_u64(void) {
87 return __arm_vcx1d_u64(0, 11);
90 // CHECK-LABEL: @test_vcx1da_u64(
91 // CHECK-NEXT: entry:
92 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[ACC:%.*]] to double
93 // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.arm.cde.vcx1a.f64(i32 1, double [[TMP0]], i32 12)
94 // CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
95 // CHECK-NEXT: ret i64 [[TMP2]]
97 uint64_t test_vcx1da_u64(uint64_t acc) {
98 return __arm_vcx1da_u64(1, acc, 12);
101 // CHECK-LABEL: @test_vcx2d_u64(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[N:%.*]] to double
104 // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.arm.cde.vcx2.f64(i32 0, double [[TMP0]], i32 21)
105 // CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
106 // CHECK-NEXT: ret i64 [[TMP2]]
108 uint64_t test_vcx2d_u64(uint64_t n) {
109 return __arm_vcx2d_u64(0, n, 21);
112 // CHECK-LABEL: @test_vcx2da_u64(
113 // CHECK-NEXT: entry:
114 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[ACC:%.*]] to double
115 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[N:%.*]] to double
116 // CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.arm.cde.vcx2a.f64(i32 0, double [[TMP0]], double [[TMP1]], i32 22)
117 // CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[TMP2]] to i64
118 // CHECK-NEXT: ret i64 [[TMP3]]
120 uint64_t test_vcx2da_u64(uint64_t acc, uint64_t n) {
121 return __arm_vcx2da_u64(0, acc, n, 22);
124 // CHECK-LABEL: @test_vcx3d_u64(
125 // CHECK-NEXT: entry:
126 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[N:%.*]] to double
127 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[M:%.*]] to double
128 // CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.arm.cde.vcx3.f64(i32 1, double [[TMP0]], double [[TMP1]], i32 3)
129 // CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[TMP2]] to i64
130 // CHECK-NEXT: ret i64 [[TMP3]]
132 uint64_t test_vcx3d_u64(uint64_t n, uint64_t m) {
133 return __arm_vcx3d_u64(1, n, m, 3);
136 // CHECK-LABEL: @test_vcx3da_u64(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[ACC:%.*]] to double
139 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[N:%.*]] to double
140 // CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[M:%.*]] to double
141 // CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.arm.cde.vcx3a.f64(i32 0, double [[TMP0]], double [[TMP1]], double [[TMP2]], i32 5)
142 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
143 // CHECK-NEXT: ret i64 [[TMP4]]
145 uint64_t test_vcx3da_u64(uint64_t acc, uint64_t n, uint64_t m) {
146 return __arm_vcx3da_u64(0, acc, n, m, 5);