Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / ext-int.c
blobaebacd6f22ffc4a524d6b4e11a54b7afc12937c3
1 // RUN: %clang_cc1 -std=c23 -triple x86_64-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK64,LIN64
2 // RUN: %clang_cc1 -std=c23 -triple x86_64-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK64,WIN64
3 // RUN: %clang_cc1 -std=c23 -triple i386-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,LIN32
4 // RUN: %clang_cc1 -std=c23 -triple i386-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,WIN32
6 // CHECK64: %struct.S1 = type { i32, [4 x i8], [24 x i8] }
7 // WIN32: %struct.S1 = type { i32, [4 x i8], [24 x i8] }
8 // LIN32: %struct.S1 = type { i32, [20 x i8] }
9 // CHECK64: %struct.S2 = type { [40 x i8], i32, [4 x i8] }
10 // WIN32: %struct.S2 = type { [40 x i8], i32, [4 x i8] }
11 // LIN32: %struct.S2 = type { [36 x i8], i32 }
12 // LIN64: %struct.S3 = type { [17 x i8], [7 x i8] }
13 // WIN64: %struct.S3 = type { [24 x i8] }
15 //GH62207
16 unsigned _BitInt(1) GlobSize1 = 0;
17 // CHECK: @GlobSize1 = {{.*}}global i8 0
19 // CHECK64: @__const.foo.A = private unnamed_addr constant { i32, [4 x i8], <{ i8, [23 x i8] }> } { i32 1, [4 x i8] zeroinitializer, <{ i8, [23 x i8] }> <{ i8 -86, [23 x i8] zeroinitializer }> }, align 8
20 // @BigGlob = global [40 x i8] c"\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF", align 8
21 // CHECK64: @f.p = internal global <{ i8, i8, [22 x i8] }> <{ i8 16, i8 39, [22 x i8] zeroinitializer }>, align 8
23 void GenericTest(_BitInt(3) a, unsigned _BitInt(3) b, _BitInt(4) c) {
24 // CHECK: define {{.*}}void @GenericTest
25 int which = _Generic(a, _BitInt(3): 1, unsigned _BitInt(3) : 2, _BitInt(4) : 3);
26 // CHECK: store i32 1
27 int which2 = _Generic(b, _BitInt(3): 1, unsigned _BitInt(3) : 2, _BitInt(4) : 3);
28 // CHECK: store i32 2
29 int which3 = _Generic(c, _BitInt(3): 1, unsigned _BitInt(3) : 2, _BitInt(4) : 3);
30 // CHECK: store i32 3
33 void VLATest(_BitInt(3) A, _BitInt(99) B, _BitInt(123) C) {
34 // CHECK: define {{.*}}void @VLATest
35 int AR1[A];
36 // CHECK: %[[A:.+]] = zext i3 %{{.+}} to i[[INDXSIZE:[0-9]+]]
37 // CHECK: %[[VLA1:.+]] = alloca i32, i[[INDXSIZE]] %[[A]]
38 int AR2[B];
39 // CHECK: %[[B:.+]] = trunc i99 %{{.+}} to i[[INDXSIZE]]
40 // CHECK: %[[VLA2:.+]] = alloca i32, i[[INDXSIZE]] %[[B]]
41 int AR3[C];
42 // CHECK: %[[C:.+]] = trunc i123 %{{.+}} to i[[INDXSIZE]]
43 // CHECK: %[[VLA3:.+]] = alloca i32, i[[INDXSIZE]] %[[C]]
46 struct S {
47 _BitInt(17) A;
48 _BitInt(128) B;
49 _BitInt(17) C;
52 void OffsetOfTest(void) {
53 // CHECK: define {{.*}}void @OffsetOfTest
54 int A = __builtin_offsetof(struct S,A);
55 // CHECK: store i32 0, ptr %{{.+}}
56 int B = __builtin_offsetof(struct S,B);
57 // CHECK64: store i32 8, ptr %{{.+}}
58 // LIN32: store i32 4, ptr %{{.+}}
59 // WIN32: store i32 8, ptr %{{.+}}
60 int C = __builtin_offsetof(struct S,C);
61 // CHECK64: store i32 24, ptr %{{.+}}
62 // LIN32: store i32 20, ptr %{{.+}}
63 // WIN32: store i32 24, ptr %{{.+}}
66 void Size1ExtIntParam(unsigned _BitInt(1) A) {
67 // CHECK: define {{.*}}void @Size1ExtIntParam(i1{{.*}} %[[PARAM:.+]])
68 // CHECK: %[[PARAM_ADDR:.+]] = alloca i8
69 // CHECK: %[[B:.+]] = alloca [5 x i8]
70 // CHECK: %[[STOREDV:.+]] = zext i1 %[[PARAM]] to i8
71 // CHECK: store i8 %[[STOREDV]], ptr %[[PARAM_ADDR]]
72 unsigned _BitInt(1) B[5];
74 // CHECK: %[[PARAM_LOAD:.+]] = load i8, ptr %[[PARAM_ADDR]]
75 // CHECK: %[[LOADEDV:.+]] = trunc i8 %0 to i1
76 // CHECK: %[[IDX:.+]] = getelementptr inbounds [5 x i8], ptr %[[B]]
77 // CHECK: %[[STOREDV1:.+]] = zext i1 %[[LOADEDV]] to i8
78 // CHECK: store i8 %[[STOREDV1]], ptr %[[IDX]]
79 B[2] = A;
82 #if __BITINT_MAXWIDTH__ > 128
83 struct S1 {
84 _BitInt(17) A;
85 _BitInt(129) B;
88 int foo(int a) {
89 // CHECK: %A1 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 0
90 // CHECK: store i32 1, ptr %A1
91 // CHECK64: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 2
92 // WIN32: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 2
93 // LIN32: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 1
94 // CHECK: %[[V1:.+]] = load i32, ptr %a.addr, align 4
95 // CHECK: %conv = sext i32 %[[V1]] to i129
96 // CHECK64: storedv = sext i129 %conv to i192
97 // WIN32: storedv = sext i129 %conv to i192
98 // LIN32: storedv = sext i129 %conv to i160
99 // CHECK64: store i192 %storedv, ptr %B2, align 8
100 // WIN32: store i192 %storedv, ptr %B2, align 8
101 // LIN32: store i160 %storedv, ptr %B2, align 4
102 // CHECK64: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 2
103 // WIN32: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 2
104 // LIN32: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 1
105 // CHECK64: %[[V2:.+]] = load i192, ptr %B3, align 8
106 // WIN32: %[[V2:.+]] = load i192, ptr %B3, align 8
107 // LIN32: %[[V2:.+]] = load i160, ptr %B3, align 4
108 // CHECK64: %loadedv = trunc i192 %[[V2]] to i129
109 // WIN32: %loadedv = trunc i192 %[[V2]] to i129
110 // LIN32: %loadedv = trunc i160 %[[V2]] to i129
111 // CHECK: %conv4 = trunc i129 %loadedv to i32
112 struct S1 A = {1, 170};
113 struct S1 B = {1, a};
114 return (int)A.B + (int)B.B;
117 struct S2 {
118 _BitInt(257) A;
119 int B;
122 _BitInt(257) bar() {
123 // CHECK64: define {{.*}}void @bar(ptr {{.*}} sret([40 x i8]) align 8 %[[RET:.+]])
124 // CHECK64: %A = alloca %struct.S2, align 8
125 // CHECK64: %0 = getelementptr inbounds { <{ i8, [39 x i8] }>, i32, [4 x i8] }, ptr %A, i32 0, i32 0
126 // CHECK64: %1 = getelementptr inbounds <{ i8, [39 x i8] }>, ptr %0, i32 0, i32 0
127 // CHECK64: store i8 1, ptr %1, align 8
128 // CHECK64: %2 = getelementptr inbounds { <{ i8, [39 x i8] }>, i32, [4 x i8] }, ptr %A, i32 0, i32 1
129 // CHECK64: store i32 10000, ptr %2, align 8
130 // CHECK64: %A1 = getelementptr inbounds nuw %struct.S2, ptr %A, i32 0, i32 0
131 // CHECK64: %3 = load i320, ptr %A1, align 8
132 // CHECK64: %loadedv = trunc i320 %3 to i257
133 // CHECK64: %storedv = sext i257 %loadedv to i320
134 // CHECK64: store i320 %storedv, ptr %[[RET]], align 8
135 struct S2 A = {1, 10000};
136 return A.A;
139 void TakesVarargs(int i, ...) {
140 // CHECK64: define{{.*}} void @TakesVarargs(i32
141 __builtin_va_list args;
142 __builtin_va_start(args, i);
144 _BitInt(160) A = __builtin_va_arg(args, _BitInt(160));
145 // CHECK64: %[[ARG:.+]] = load i192
146 // CHECK64: %[[TRUNC:.+]] = trunc i192 %[[ARG]] to i160
147 // CHECK64: %[[SEXT:.+]] = sext i160 %[[TRUNC]] to i192
148 // CHECK64: store i192 %[[SEXT]], ptr %A, align 8
151 _BitInt(129) *f1(_BitInt(129) *p) {
152 // CHECK64: getelementptr inbounds [24 x i8], {{.*}} i64 1
153 return p + 1;
156 char *f2(char *p) {
157 // CHECK64: getelementptr inbounds nuw i8, {{.*}} i64 24
158 return p + sizeof(_BitInt(129));
161 auto BigGlob = (_BitInt(257))-1;
162 // CHECK64: define {{.*}}void @foobar(ptr {{.*}} sret([40 x i8]) align 8 %[[RET1:.+]])
163 _BitInt(257) foobar() {
164 // CHECK64: %A = alloca [40 x i8], align 8
165 // CHECK64: %0 = load i320, ptr @BigGlob, align 8
166 // CHECK64: %loadedv = trunc i320 %0 to i257
167 // CHECK64: %add = add nsw i257 %loadedv, 1
168 // CHECK64: %storedv = sext i257 %add to i320
169 // CHECK64: store i320 %storedv, ptr %A, align 8
170 // CHECK64: %1 = load i320, ptr %A, align 8
171 // CHECK64: %loadedv1 = trunc i320 %1 to i257
172 // CHECK64: %storedv2 = sext i257 %loadedv1 to i320
173 // CHECK64: store i320 %storedv2, ptr %[[RET1]], align 8
174 _BitInt(257) A = BigGlob + 1;
175 return A;
178 void f() {
179 static _BitInt(130) p = {10000};
182 struct S3 {
183 _BitInt (136) A : 129;
186 void bitField() {
187 struct S3 s = {1};
188 struct {
189 _BitInt (136) A : 48;
190 int a;
191 } s1 = {s.A};
192 s1.A = 36;
193 // LIN64: %s = alloca %struct.S3, align 8
194 // LIN64: %s1 = alloca %struct.anon, align 8
195 // LIN64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %s, ptr align 8 @__const.bitField.s, i64 24, i1 false)
196 // LIN64: %bf.load = load i136, ptr %s, align 8
197 // LIN64: %bf.shl = shl i136 %bf.load, 7
198 // LIN64: %bf.ashr = ashr i136 %bf.shl, 7
199 // LIN64: %0 = trunc i136 %bf.ashr to i64
200 // LIN64: %bf.load1 = load i64, ptr %s1, align 8
201 // LIN64: %bf.value = and i64 %0, 281474976710655
202 // LIN64: %bf.clear = and i64 %bf.load1, -281474976710656
203 // LIN64: %bf.set = or i64 %bf.clear, %bf.value
204 // LIN64: store i64 %bf.set, ptr %s1, align 8
205 // LIN64: %a = getelementptr inbounds nuw %struct.anon, ptr %s1, i32 0, i32 1
206 // LIN64: store i32 0, ptr %a, align 8
207 // LIN64: %bf.load2 = load i64, ptr %s1, align 8
208 // LIN64: %bf.clear3 = and i64 %bf.load2, -281474976710656
209 // LIN64: %bf.set4 = or i64 %bf.clear3, 36
210 // LIN64: store i64 %bf.set4, ptr %s1, align 8
213 #endif