Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / builtins-reduction-math.c
blobe12fd729c84c0b7dc943c1da703174cea03c0f02
1 // RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
3 // REQUIRES: aarch64-registered-target
4 // RUN: %clang_cc1 -O1 -triple aarch64 -target-feature +sve %s -emit-llvm -disable-llvm-passes -o - | FileCheck --check-prefixes=SVE %s
6 typedef float float4 __attribute__((ext_vector_type(4)));
7 typedef short int si8 __attribute__((ext_vector_type(8)));
8 typedef unsigned int u4 __attribute__((ext_vector_type(4)));
10 __attribute__((address_space(1))) float4 vf1_as_one;
12 void test_builtin_reduce_max(float4 vf1, si8 vi1, u4 vu1) {
13 // CHECK-LABEL: define void @test_builtin_reduce_max(
14 // CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
15 // CHECK-NEXT: call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[VF1]])
16 float r1 = __builtin_reduce_max(vf1);
18 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
19 // CHECK-NEXT: call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> [[VI1]])
20 short r2 = __builtin_reduce_max(vi1);
22 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
23 // CHECK-NEXT: call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[VU1]])
24 unsigned r3 = __builtin_reduce_max(vu1);
26 // CHECK: [[VF1_AS1:%.+]] = load <4 x float>, ptr addrspace(1) @vf1_as_one, align 16
27 // CHECK-NEXT: [[RDX1:%.+]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[VF1_AS1]])
28 // CHECK-NEXT: fpext float [[RDX1]] to double
29 const double r4 = __builtin_reduce_max(vf1_as_one);
31 // CHECK: [[CVI1:%.+]] = load <8 x i16>, ptr %cvi1, align 16
32 // CHECK-NEXT: [[RDX2:%.+]] = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> [[CVI1]])
33 // CHECK-NEXT: sext i16 [[RDX2]] to i64
34 const si8 cvi1 = vi1;
35 unsigned long long r5 = __builtin_reduce_max(cvi1);
38 void test_builtin_reduce_min(float4 vf1, si8 vi1, u4 vu1) {
39 // CHECK-LABEL: define void @test_builtin_reduce_min(
40 // CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
41 // CHECK-NEXT: call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[VF1]])
42 float r1 = __builtin_reduce_min(vf1);
44 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
45 // CHECK-NEXT: call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> [[VI1]])
46 short r2 = __builtin_reduce_min(vi1);
48 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
49 // CHECK-NEXT: call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[VU1]])
50 unsigned r3 = __builtin_reduce_min(vu1);
52 // CHECK: [[VF1_AS1:%.+]] = load <4 x float>, ptr addrspace(1) @vf1_as_one, align 16
53 // CHECK-NEXT: [[RDX1:%.+]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[VF1_AS1]])
54 // CHECK-NEXT: fpext float [[RDX1]] to double
55 const double r4 = __builtin_reduce_min(vf1_as_one);
57 // CHECK: [[CVI1:%.+]] = load <8 x i16>, ptr %cvi1, align 16
58 // CHECK-NEXT: [[RDX2:%.+]] = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> [[CVI1]])
59 // CHECK-NEXT: sext i16 [[RDX2]] to i64
60 const si8 cvi1 = vi1;
61 unsigned long long r5 = __builtin_reduce_min(cvi1);
64 void test_builtin_reduce_add(si8 vi1, u4 vu1) {
65 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
66 // CHECK-NEXT: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[VI1]])
67 short r2 = __builtin_reduce_add(vi1);
69 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
70 // CHECK-NEXT: call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VU1]])
71 unsigned r3 = __builtin_reduce_add(vu1);
73 // CHECK: [[CVI1:%.+]] = load <8 x i16>, ptr %cvi1, align 16
74 // CHECK-NEXT: [[RDX1:%.+]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[CVI1]])
75 // CHECK-NEXT: sext i16 [[RDX1]] to i32
76 const si8 cvi1 = vi1;
77 int r4 = __builtin_reduce_add(cvi1);
79 // CHECK: [[CVU1:%.+]] = load <4 x i32>, ptr %cvu1, align 16
80 // CHECK-NEXT: [[RDX2:%.+]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[CVU1]])
81 // CHECK-NEXT: zext i32 [[RDX2]] to i64
82 const u4 cvu1 = vu1;
83 unsigned long long r5 = __builtin_reduce_add(cvu1);
86 void test_builtin_reduce_mul(si8 vi1, u4 vu1) {
87 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
88 // CHECK-NEXT: call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> [[VI1]])
89 short r2 = __builtin_reduce_mul(vi1);
91 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
92 // CHECK-NEXT: call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VU1]])
93 unsigned r3 = __builtin_reduce_mul(vu1);
95 // CHECK: [[CVI1:%.+]] = load <8 x i16>, ptr %cvi1, align 16
96 // CHECK-NEXT: [[RDX1:%.+]] = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> [[CVI1]])
97 // CHECK-NEXT: sext i16 [[RDX1]] to i32
98 const si8 cvi1 = vi1;
99 int r4 = __builtin_reduce_mul(cvi1);
101 // CHECK: [[CVU1:%.+]] = load <4 x i32>, ptr %cvu1, align 16
102 // CHECK-NEXT: [[RDX2:%.+]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[CVU1]])
103 // CHECK-NEXT: zext i32 [[RDX2]] to i64
104 const u4 cvu1 = vu1;
105 unsigned long long r5 = __builtin_reduce_mul(cvu1);
108 void test_builtin_reduce_xor(si8 vi1, u4 vu1) {
110 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
111 // CHECK-NEXT: call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> [[VI1]])
112 short r2 = __builtin_reduce_xor(vi1);
114 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
115 // CHECK-NEXT: call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[VU1]])
116 unsigned r3 = __builtin_reduce_xor(vu1);
119 void test_builtin_reduce_or(si8 vi1, u4 vu1) {
121 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
122 // CHECK-NEXT: call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[VI1]])
123 short r2 = __builtin_reduce_or(vi1);
125 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
126 // CHECK-NEXT: call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[VU1]])
127 unsigned r3 = __builtin_reduce_or(vu1);
130 void test_builtin_reduce_and(si8 vi1, u4 vu1) {
132 // CHECK: [[VI1:%.+]] = load <8 x i16>, ptr %vi1.addr, align 16
133 // CHECK-NEXT: call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> [[VI1]])
134 short r2 = __builtin_reduce_and(vi1);
136 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
137 // CHECK-NEXT: call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[VU1]])
138 unsigned r3 = __builtin_reduce_and(vu1);
141 void test_builtin_reduce_maximum(float4 vf1) {
142 // CHECK-LABEL: define void @test_builtin_reduce_maximum(
143 // CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
144 // CHECK-NEXT: call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> [[VF1]])
145 float r1 = __builtin_reduce_maximum(vf1);
147 // CHECK: [[VF1_AS1:%.+]] = load <4 x float>, ptr addrspace(1) @vf1_as_one, align 16
148 // CHECK-NEXT: [[RDX1:%.+]] = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> [[VF1_AS1]])
149 // CHECK-NEXT: fpext float [[RDX1]] to double
150 const double r4 = __builtin_reduce_maximum(vf1_as_one);
153 void test_builtin_reduce_minimum(float4 vf1) {
154 // CHECK-LABEL: define void @test_builtin_reduce_minimum(
155 // CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
156 // CHECK-NEXT: call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> [[VF1]])
157 float r1 = __builtin_reduce_minimum(vf1);
159 // CHECK: [[VF1_AS1:%.+]] = load <4 x float>, ptr addrspace(1) @vf1_as_one, align 16
160 // CHECK-NEXT: [[RDX1:%.+]] = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> [[VF1_AS1]])
161 // CHECK-NEXT: fpext float [[RDX1]] to double
162 const double r4 = __builtin_reduce_minimum(vf1_as_one);
165 #if defined(__ARM_FEATURE_SVE)
166 #include <arm_sve.h>
168 void test_builtin_reduce_SVE(int a, unsigned long long b, short c, float d) {
169 // SVE-LABEL: void @test_builtin_reduce_SVE(
171 svint32_t vec_a = svdup_s32(a);
172 svuint64_t vec_b = svdup_u64(b);
173 svint16_t vec_c1 = svdup_s16(c);
174 svuint16_t vec_c2 = svdup_u16(c);
175 svfloat32_t vec_d = svdup_f32(d);
177 // SVE: [[VF1:%.+]] = load <vscale x 4 x i32>, ptr %vec_a
178 // SVE-NEXT: call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[VF1]])
179 int r1 = __builtin_reduce_add(vec_a);
181 // SVE: [[VF2:%.+]] = load <vscale x 4 x i32>, ptr %vec_a
182 // SVE-NEXT: call i32 @llvm.vector.reduce.mul.nxv4i32(<vscale x 4 x i32> [[VF2]])
183 int r2 = __builtin_reduce_mul(vec_a);
185 // SVE: [[VF3:%.+]] = load <vscale x 2 x i64>, ptr %vec_b
186 // SVE-NEXT: call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> [[VF3]])
187 long long r3 = __builtin_reduce_xor(vec_b);
189 // SVE: [[VF4:%.+]] = load <vscale x 2 x i64>, ptr %vec_b
190 // SVE-NEXT: call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> [[VF4]])
191 long long r4 = __builtin_reduce_or(vec_b);
193 // SVE: [[VF5:%.+]] = load <vscale x 2 x i64>, ptr %vec_b
194 // SVE-NEXT: call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[VF5]])
195 long long r5 = __builtin_reduce_and(vec_b);
197 // SVE: [[VF6:%.+]] = load <vscale x 8 x i16>, ptr %vec_c1
198 // SVE-NEXT: call i16 @llvm.vector.reduce.smax.nxv8i16(<vscale x 8 x i16> [[VF6]])
199 short r6 = __builtin_reduce_max(vec_c1);
201 // SVE: [[VF7:%.+]] = load <vscale x 8 x i16>, ptr %vec_c2
202 // SVE-NEXT: call i16 @llvm.vector.reduce.umin.nxv8i16(<vscale x 8 x i16> [[VF7]])
203 unsigned short r7 = __builtin_reduce_min(vec_c2);
205 // SVE: [[VF8:%.+]] = load <vscale x 4 x float>, ptr %vec_d
206 // SVE-NEXT: call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[VF8]])
207 float r8 = __builtin_reduce_max(vec_d);
209 // SVE: [[VF9:%.+]] = load <vscale x 4 x float>, ptr %vec_d
210 // SVE-NEXT: call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[VF9]])
211 float r9 = __builtin_reduce_min(vec_d);
213 #endif