Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / vector-alignment.c
blobc0b607e96c61866cd0e796f6e33e92ac5bcab0a7
1 // RUN: %clang_cc1 -w -triple x86_64-apple-darwin10 \
2 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE
3 // RUN: %clang_cc1 -w -triple i386-apple-darwin10 \
4 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE
5 // RUN: %clang_cc1 -w -triple x86_64-apple-darwin10 -target-feature +avx \
6 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX
7 // RUN: %clang_cc1 -w -triple i386-apple-darwin10 -target-feature +avx \
8 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX
9 // RUN: %clang_cc1 -w -triple x86_64-apple-darwin10 -target-feature +avx512f \
10 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX512
11 // RUN: %clang_cc1 -w -triple i386-apple-darwin10 -target-feature +avx512f \
12 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX512
14 // At or below target max alignment with no aligned attribute should align based
15 // on the size of vector.
16 double __attribute__((vector_size(16))) v1;
17 // SSE: @v1 {{.*}}, align 16
18 // AVX: @v1 {{.*}}, align 16
19 // AVX512: @v1 {{.*}}, align 16
20 double __attribute__((vector_size(32))) v2;
21 // SSE: @v2 {{.*}}, align 16
22 // AVX: @v2 {{.*}}, align 32
23 // AVX512: @v2 {{.*}}, align 32
24 typedef __attribute__((__ext_vector_type__(16))) _Bool v2b_type;
25 v2b_type v2b;
26 // ALL: @v2b {{.*}}, align 2
28 // Alignment above target max alignment with no aligned attribute should align
29 // based on the target max.
30 double __attribute__((vector_size(64))) v3;
31 // SSE: @v3 {{.*}}, align 16
32 // AVX: @v3 {{.*}}, align 32
33 // AVX512: @v3 {{.*}}, align 64
34 double __attribute__((vector_size(1024))) v4;
35 // SSE: @v4 {{.*}}, align 16
36 // AVX: @v4 {{.*}}, align 32
37 // AVX512: @v4 {{.*}}, align 64
38 typedef __attribute__((__ext_vector_type__(8192))) _Bool v4b_type;
39 v4b_type v4b;
40 // SSE: @v4b {{.*}}, align 16
41 // AVX: @v4b {{.*}}, align 32
42 // AVX512: @v4b {{.*}}, align 64
44 // Aliged attribute should always override.
45 double __attribute__((vector_size(16), aligned(16))) v5;
46 // ALL: @v5 {{.*}}, align 16
47 double __attribute__((vector_size(16), aligned(64))) v6;
48 // ALL: @v6 {{.*}}, align 64
49 double __attribute__((vector_size(32), aligned(16))) v7;
50 // ALL: @v7 {{.*}}, align 16
51 double __attribute__((vector_size(32), aligned(64))) v8;
52 // ALL: @v8 {{.*}}, align 64
53 typedef __attribute__((ext_vector_type(256), aligned(128))) _Bool v8b_type;
54 v8b_type v8b;
55 // ALL: @v8b {{.*}}, align 128
57 // Check non-power of 2 widths.
58 double __attribute__((vector_size(24))) v9;
59 // SSE: @v9 {{.*}}, align 16
60 // AVX: @v9 {{.*}}, align 32
61 // AVX512: @v9 {{.*}}, align 32
62 double __attribute__((vector_size(40))) v10;
63 // SSE: @v10 {{.*}}, align 16
64 // AVX: @v10 {{.*}}, align 32
65 // AVX512: @v10 {{.*}}, align 64
66 typedef __attribute__((ext_vector_type(248))) _Bool v10b_type;
67 v10b_type v10b;
68 // SSE: @v10b {{.*}}, align 16
69 // AVX: @v10b {{.*}}, align 32
70 // AVX512: @v10b {{.*}}, align 32
72 // Check non-power of 2 widths with aligned attribute.
73 double __attribute__((vector_size(24), aligned(64))) v11;
74 // ALL: @v11 {{.*}}, align 64
75 double __attribute__((vector_size(80), aligned(16))) v12;
76 // ALL: @v12 {{.*}}, align 16
77 typedef __attribute__((ext_vector_type(248), aligned(4))) _Bool v12b_type;
78 v12b_type v12b;
79 // ALL: @v12b {{.*}}, align 4