Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / arm64-abi-vector.c
blob81e42315c883bdbfc63e137495fcdaea0b1eb6b8
1 // RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s
2 // RUN: %clang_cc1 -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s
4 #include <stdarg.h>
6 typedef __attribute__(( ext_vector_type(2) )) char __char2;
7 typedef __attribute__(( ext_vector_type(3) )) char __char3;
8 typedef __attribute__(( ext_vector_type(4) )) char __char4;
9 typedef __attribute__(( ext_vector_type(5) )) char __char5;
10 typedef __attribute__(( ext_vector_type(9) )) char __char9;
11 typedef __attribute__(( ext_vector_type(19) )) char __char19;
12 typedef __attribute__(( ext_vector_type(3) )) short __short3;
13 typedef __attribute__(( ext_vector_type(5) )) short __short5;
14 typedef __attribute__(( ext_vector_type(3) )) int __int3;
15 typedef __attribute__(( ext_vector_type(5) )) int __int5;
16 typedef __attribute__(( ext_vector_type(3) )) double __double3;
18 // Passing legal vector types as varargs. Check that we've allocated the appropriate size
19 double varargs_vec_2c(int fixed, ...) {
20 // ANDROID: varargs_vec_2c
21 // ANDROID: [[VAR:%.*]] = alloca <2 x i8>, align 2
22 // ANDROID: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
23 va_list ap;
24 double sum = fixed;
25 va_start(ap, fixed);
26 __char2 c3 = va_arg(ap, __char2);
27 sum = sum + c3.x + c3.y;
28 va_end(ap);
29 return sum;
32 double test_2c(__char2 *in) {
33 // ANDROID: call double (i32, ...) @varargs_vec_2c(i32 noundef 3, i16 noundef {{%.*}})
34 return varargs_vec_2c(3, *in);
37 double varargs_vec_3c(int fixed, ...) {
38 // CHECK: varargs_vec_3c
39 // CHECK: alloca <3 x i8>, align 4
40 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
41 va_list ap;
42 double sum = fixed;
43 va_start(ap, fixed);
44 __char3 c3 = va_arg(ap, __char3);
45 sum = sum + c3.x + c3.y;
46 va_end(ap);
47 return sum;
50 double test_3c(__char3 *in) {
51 // CHECK: test_3c
52 // CHECK: call double (i32, ...) @varargs_vec_3c(i32 noundef 3, i32 {{%.*}})
53 return varargs_vec_3c(3, *in);
56 double varargs_vec_4c(int fixed, ...) {
57 // CHECK: varargs_vec_4c
58 // CHECK: alloca <4 x i8>, align 4
59 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
60 va_list ap;
61 double sum = fixed;
62 va_start(ap, fixed);
63 __char4 c4 = va_arg(ap, __char4);
64 sum = sum + c4.x + c4.y;
65 va_end(ap);
66 return sum;
69 double test_4c(__char4 *in) {
70 // CHECK: test_4c
71 // CHECK: call double (i32, ...) @varargs_vec_4c(i32 noundef 4, i32 noundef {{%.*}})
72 return varargs_vec_4c(4, *in);
75 double varargs_vec_5c(int fixed, ...) {
76 // CHECK: varargs_vec_5c
77 // CHECK: alloca <5 x i8>, align 8
78 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
79 va_list ap;
80 double sum = fixed;
81 va_start(ap, fixed);
82 __char5 c5 = va_arg(ap, __char5);
83 sum = sum + c5.x + c5.y;
84 va_end(ap);
85 return sum;
88 double test_5c(__char5 *in) {
89 // CHECK: test_5c
90 // CHECK: call double (i32, ...) @varargs_vec_5c(i32 noundef 5, <2 x i32> {{%.*}})
91 return varargs_vec_5c(5, *in);
94 double varargs_vec_9c(int fixed, ...) {
95 // CHECK: varargs_vec_9c
96 // CHECK: alloca <9 x i8>, align 16
97 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
98 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
99 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
100 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
101 va_list ap;
102 double sum = fixed;
103 va_start(ap, fixed);
104 __char9 c9 = va_arg(ap, __char9);
105 sum = sum + c9.x + c9.y;
106 va_end(ap);
107 return sum;
110 double test_9c(__char9 *in) {
111 // CHECK: test_9c
112 // CHECK: call double (i32, ...) @varargs_vec_9c(i32 noundef 9, <4 x i32> {{%.*}})
113 return varargs_vec_9c(9, *in);
116 double varargs_vec_19c(int fixed, ...) {
117 // CHECK: varargs_vec_19c
118 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
119 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
120 va_list ap;
121 double sum = fixed;
122 va_start(ap, fixed);
123 __char19 c19 = va_arg(ap, __char19);
124 sum = sum + c19.x + c19.y;
125 va_end(ap);
126 return sum;
129 double test_19c(__char19 *in) {
130 // CHECK: test_19c
131 // CHECK: call double (i32, ...) @varargs_vec_19c(i32 noundef 19, ptr noundef {{%.*}})
132 return varargs_vec_19c(19, *in);
135 double varargs_vec_3s(int fixed, ...) {
136 // CHECK: varargs_vec_3s
137 // CHECK: alloca <3 x i16>, align 8
138 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
139 va_list ap;
140 double sum = fixed;
141 va_start(ap, fixed);
142 __short3 c3 = va_arg(ap, __short3);
143 sum = sum + c3.x + c3.y;
144 va_end(ap);
145 return sum;
148 double test_3s(__short3 *in) {
149 // CHECK: test_3s
150 // CHECK: call double (i32, ...) @varargs_vec_3s(i32 noundef 3, <2 x i32> {{%.*}})
151 return varargs_vec_3s(3, *in);
154 double varargs_vec_5s(int fixed, ...) {
155 // CHECK: varargs_vec_5s
156 // CHECK: alloca <5 x i16>, align 16
157 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
158 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
159 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
160 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
161 va_list ap;
162 double sum = fixed;
163 va_start(ap, fixed);
164 __short5 c5 = va_arg(ap, __short5);
165 sum = sum + c5.x + c5.y;
166 va_end(ap);
167 return sum;
170 double test_5s(__short5 *in) {
171 // CHECK: test_5s
172 // CHECK: call double (i32, ...) @varargs_vec_5s(i32 noundef 5, <4 x i32> {{%.*}})
173 return varargs_vec_5s(5, *in);
176 double varargs_vec_3i(int fixed, ...) {
177 // CHECK: varargs_vec_3i
178 // CHECK: alloca <3 x i32>, align 16
179 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
180 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
181 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
182 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
183 va_list ap;
184 double sum = fixed;
185 va_start(ap, fixed);
186 __int3 c3 = va_arg(ap, __int3);
187 sum = sum + c3.x + c3.y;
188 va_end(ap);
189 return sum;
192 double test_3i(__int3 *in) {
193 // CHECK: test_3i
194 // CHECK: call double (i32, ...) @varargs_vec_3i(i32 noundef 3, <4 x i32> {{%.*}})
195 return varargs_vec_3i(3, *in);
198 double varargs_vec_5i(int fixed, ...) {
199 // CHECK: varargs_vec_5i
200 // CHECK: alloca <5 x i32>, align 16
201 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
202 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
203 va_list ap;
204 double sum = fixed;
205 va_start(ap, fixed);
206 __int5 c5 = va_arg(ap, __int5);
207 sum = sum + c5.x + c5.y;
208 va_end(ap);
209 return sum;
212 double test_5i(__int5 *in) {
213 // CHECK: test_5i
214 // CHECK: call double (i32, ...) @varargs_vec_5i(i32 noundef 5, ptr noundef {{%.*}})
215 return varargs_vec_5i(5, *in);
218 double varargs_vec_3d(int fixed, ...) {
219 // CHECK: varargs_vec_3d
220 // CHECK: alloca <3 x double>, align 16
221 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
222 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
223 va_list ap;
224 double sum = fixed;
225 va_start(ap, fixed);
226 __double3 c3 = va_arg(ap, __double3);
227 sum = sum + c3.x + c3.y;
228 va_end(ap);
229 return sum;
232 double test_3d(__double3 *in) {
233 // CHECK: test_3d
234 // CHECK: call double (i32, ...) @varargs_vec_3d(i32 noundef 3, ptr noundef {{%.*}})
235 return varargs_vec_3d(3, *in);
238 double varargs_vec(int fixed, ...) {
239 // CHECK: varargs_vec
240 va_list ap;
241 double sum = fixed;
242 va_start(ap, fixed);
243 __char3 c3 = va_arg(ap, __char3);
244 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
245 sum = sum + c3.x + c3.y;
246 __char5 c5 = va_arg(ap, __char5);
247 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
248 sum = sum + c5.x + c5.y;
249 __char9 c9 = va_arg(ap, __char9);
252 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
253 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
254 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
255 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
256 sum = sum + c9.x + c9.y;
257 __char19 c19 = va_arg(ap, __char19);
258 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
259 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
260 sum = sum + c19.x + c19.y;
261 __short3 s3 = va_arg(ap, __short3);
262 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
263 sum = sum + s3.x + s3.y;
264 __short5 s5 = va_arg(ap, __short5);
266 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
267 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
268 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
269 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
270 sum = sum + s5.x + s5.y;
271 __int3 i3 = va_arg(ap, __int3);
273 // CHECK: [[AP:%.*]] = load ptr, ptr %ap, align 8
274 // CHECK: [[AP_ADD:%.*]] = getelementptr inbounds i8, ptr [[AP]], i32 15
275 // CHECK: [[AP_ALIGN:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[AP_ADD]], i64 -16)
276 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_ALIGN]], i64 16
277 sum = sum + i3.x + i3.y;
278 __int5 i5 = va_arg(ap, __int5);
279 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
280 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
281 sum = sum + i5.x + i5.y;
282 __double3 d3 = va_arg(ap, __double3);
283 // CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[AP_CUR:%.*]], i64 8
284 // CHECK: [[VAR2:%.*]] = load ptr, ptr [[AP_CUR]]
285 sum = sum + d3.x + d3.y;
286 va_end(ap);
287 return sum;
290 double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19,
291 __short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5,
292 __double3 *d3) {
293 double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3);
294 // CHECK: call double (i32, ...) @varargs_vec(i32 noundef 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, ptr noundef {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, ptr noundef {{%.*}}, ptr noundef {{%.*}})
295 return ret;
298 __attribute__((noinline)) double args_vec_3c(int fixed, __char3 c3) {
299 // CHECK: args_vec_3c
300 // CHECK: [[C3:%.*]] = alloca <3 x i8>, align 4
301 // CHECK: store i32 {{%.*}}, ptr [[C3]]
302 double sum = fixed;
303 sum = sum + c3.x + c3.y;
304 return sum;
307 double fixed_3c(__char3 *in) {
308 // CHECK: fixed_3c
309 // CHECK: call double @args_vec_3c(i32 noundef 3, i32 {{%.*}})
310 return args_vec_3c(3, *in);
313 __attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) {
314 // CHECK: args_vec_5c
315 // CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8
316 // CHECK: store <2 x i32> {{%.*}}, ptr [[C5]], align 8
317 double sum = fixed;
318 sum = sum + c5.x + c5.y;
319 return sum;
322 double fixed_5c(__char5 *in) {
323 // CHECK: fixed_5c
324 // CHECK: call double @args_vec_5c(i32 noundef 5, <2 x i32> {{%.*}})
325 return args_vec_5c(5, *in);
328 __attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) {
329 // CHECK: args_vec_9c
330 // CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16
331 // CHECK: store <4 x i32> {{%.*}}, ptr [[C9]], align 16
332 double sum = fixed;
333 sum = sum + c9.x + c9.y;
334 return sum;
337 double fixed_9c(__char9 *in) {
338 // CHECK: fixed_9c
339 // CHECK: call double @args_vec_9c(i32 noundef 9, <4 x i32> {{%.*}})
340 return args_vec_9c(9, *in);
343 __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) {
344 // CHECK: args_vec_19c
345 // CHECK: [[C19:%.*]] = load <19 x i8>, ptr {{.*}}, align 16
346 double sum = fixed;
347 sum = sum + c19.x + c19.y;
348 return sum;
351 double fixed_19c(__char19 *in) {
352 // CHECK: fixed_19c
353 // CHECK: call double @args_vec_19c(i32 noundef 19, ptr noundef {{%.*}})
354 return args_vec_19c(19, *in);
357 __attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) {
358 // CHECK: args_vec_3s
359 // CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8
360 // CHECK: store <2 x i32> {{%.*}}, ptr [[C3]], align 8
361 double sum = fixed;
362 sum = sum + c3.x + c3.y;
363 return sum;
366 double fixed_3s(__short3 *in) {
367 // CHECK: fixed_3s
368 // CHECK: call double @args_vec_3s(i32 noundef 3, <2 x i32> {{%.*}})
369 return args_vec_3s(3, *in);
372 __attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) {
373 // CHECK: args_vec_5s
374 // CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16
375 // CHECK: store <4 x i32> {{%.*}}, ptr [[C5]], align 16
376 double sum = fixed;
377 sum = sum + c5.x + c5.y;
378 return sum;
381 double fixed_5s(__short5 *in) {
382 // CHECK: fixed_5s
383 // CHECK: call double @args_vec_5s(i32 noundef 5, <4 x i32> {{%.*}})
384 return args_vec_5s(5, *in);
387 __attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) {
388 // CHECK: args_vec_3i
389 // CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16
390 // CHECK: store <4 x i32> {{%.*}}, ptr [[C3]], align 16
391 double sum = fixed;
392 sum = sum + c3.x + c3.y;
393 return sum;
396 double fixed_3i(__int3 *in) {
397 // CHECK: fixed_3i
398 // CHECK: call double @args_vec_3i(i32 noundef 3, <4 x i32> {{%.*}})
399 return args_vec_3i(3, *in);
402 __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) {
403 // CHECK: args_vec_5i
404 // CHECK: [[C5:%.*]] = load <5 x i32>, ptr {{%.*}}, align 16
405 double sum = fixed;
406 sum = sum + c5.x + c5.y;
407 return sum;
410 double fixed_5i(__int5 *in) {
411 // CHECK: fixed_5i
412 // CHECK: call double @args_vec_5i(i32 noundef 5, ptr noundef {{%.*}})
413 return args_vec_5i(5, *in);
416 __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) {
417 // CHECK: args_vec_3d
418 // CHECK: [[LOAD:%.*]] = load <4 x double>, ptr {{%.*}}
419 // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> poison, <3 x i32> <i32 0, i32 1, i32 2>
420 double sum = fixed;
421 sum = sum + c3.x + c3.y;
422 return sum;
425 double fixed_3d(__double3 *in) {
426 // CHECK: fixed_3d
427 // CHECK: call double @args_vec_3d(i32 noundef 3, ptr noundef {{%.*}})
428 return args_vec_3d(3, *in);