Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / bitfield-2.c
blob8688ba6390ddbee311ec840843780df7a383134b
1 // RUN: %clang_cc1 -emit-llvm -triple x86_64 -O3 -o %t.opt.ll %s \
2 // RUN: -fdump-record-layouts > %t.dump.txt
3 // RUN: FileCheck -check-prefix=CHECK-RECORD < %t.dump.txt %s
4 // RUN: FileCheck -check-prefix=CHECK-OPT < %t.opt.ll %s
6 /****/
8 // Check that we don't read off the end a packed 24-bit structure.
9 // PR6176
11 // CHECK-RECORD: *** Dumping IRgen Record Layout
12 // CHECK-RECORD: Record: RecordDecl{{.*}}s0
13 // CHECK-RECORD: Layout: <CGRecordLayout
14 // CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
15 // CHECK-RECORD: IsZeroInitializable:1
16 // CHECK-RECORD: BitFields:[
17 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0
18 struct __attribute((packed)) s0 {
19 int f0 : 24;
22 struct s0 g0 = { 0xdeadbeef };
24 int f0_load(struct s0 *a0) {
25 int size_check[sizeof(struct s0) == 3 ? 1 : -1];
26 return a0->f0;
28 int f0_store(struct s0 *a0) {
29 return (a0->f0 = 1);
31 int f0_reload(struct s0 *a0) {
32 return (a0->f0 += 1);
35 // CHECK-OPT-LABEL: define{{.*}} i64 @test_0()
36 // CHECK-OPT: ret i64 1
37 // CHECK-OPT: }
38 unsigned long long test_0(void) {
39 struct s0 g0 = { 0xdeadbeef };
40 unsigned long long res = 0;
41 res ^= g0.f0;
42 res ^= f0_load(&g0) ^ f0_store(&g0) ^ f0_reload(&g0);
43 res ^= g0.f0;
44 return res;
47 /****/
49 // PR5591
51 // CHECK-RECORD: *** Dumping IRgen Record Layout
52 // CHECK-RECORD: Record: RecordDecl{{.*}}s1
53 // CHECK-RECORD: Layout: <CGRecordLayout
54 // CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
55 // CHECK-RECORD: IsZeroInitializable:1
56 // CHECK-RECORD: BitFields:[
57 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
58 // CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
60 #pragma pack(push)
61 #pragma pack(1)
62 struct __attribute((packed)) s1 {
63 signed f0 : 10;
64 signed f1 : 10;
66 #pragma pack(pop)
68 struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
70 int f1_load(struct s1 *a0) {
71 int size_check[sizeof(struct s1) == 3 ? 1 : -1];
72 return a0->f1;
74 int f1_store(struct s1 *a0) {
75 return (a0->f1 = 1234);
77 int f1_reload(struct s1 *a0) {
78 return (a0->f1 += 1234);
81 // CHECK-OPT-LABEL: define{{.*}} i64 @test_1()
82 // CHECK-OPT: ret i64 210
83 // CHECK-OPT: }
84 unsigned long long test_1(void) {
85 struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
86 unsigned long long res = 0;
87 res ^= g1.f0 ^ g1.f1;
88 res ^= f1_load(&g1) ^ f1_store(&g1) ^ f1_reload(&g1);
89 res ^= g1.f0 ^ g1.f1;
90 return res;
93 /****/
95 // Check that we don't access beyond the bounds of a union.
97 // PR5567
99 // CHECK-RECORD: *** Dumping IRgen Record Layout
100 // CHECK-RECORD: Record: RecordDecl{{.*}}u2
101 // CHECK-RECORD: Layout: <CGRecordLayout
102 // CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
103 // CHECK-RECORD: IsZeroInitializable:1
104 // CHECK-RECORD: BitFields:[
105 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0
107 union __attribute__((packed)) u2 {
108 unsigned long long f0 : 3;
111 union u2 g2 = { 0xdeadbeef };
113 int f2_load(union u2 *a0) {
114 return a0->f0;
116 int f2_store(union u2 *a0) {
117 return (a0->f0 = 1234);
119 int f2_reload(union u2 *a0) {
120 return (a0->f0 += 1234);
123 // CHECK-OPT-LABEL: define{{.*}} i64 @test_2()
124 // CHECK-OPT: ret i64 2
125 // CHECK-OPT: }
126 unsigned long long test_2(void) {
127 union u2 g2 = { 0xdeadbeef };
128 unsigned long long res = 0;
129 res ^= g2.f0;
130 res ^= f2_load(&g2) ^ f2_store(&g2) ^ f2_reload(&g2);
131 res ^= g2.f0;
132 return res;
135 /***/
137 // PR5039
139 struct s3 {
140 long long f0 : 32;
141 long long f1 : 32;
144 struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
146 int f3_load(struct s3 *a0) {
147 a0->f0 = 1;
148 return a0->f0;
150 int f3_store(struct s3 *a0) {
151 a0->f0 = 1;
152 return (a0->f0 = 1234);
154 int f3_reload(struct s3 *a0) {
155 a0->f0 = 1;
156 return (a0->f0 += 1234);
159 // CHECK-OPT-LABEL: define{{.*}} i64 @test_3()
160 // CHECK-OPT: ret i64 -559039940
161 // CHECK-OPT: }
162 unsigned long long test_3(void) {
163 struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
164 unsigned long long res = 0;
165 res ^= g3.f0 ^ g3.f1;
166 res ^= f3_load(&g3) ^ f3_store(&g3) ^ f3_reload(&g3);
167 res ^= g3.f0 ^ g3.f1;
168 return res;
171 /***/
173 // This is a case where the bitfield access will straddle an alignment boundary
174 // of its underlying type.
176 struct s4 {
177 unsigned f0 : 16;
178 unsigned f1 : 28 __attribute__ ((packed));
181 struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
183 int f4_load(struct s4 *a0) {
184 return a0->f0 ^ a0->f1;
186 int f4_store(struct s4 *a0) {
187 return (a0->f0 = 1234) ^ (a0->f1 = 5678);
189 int f4_reload(struct s4 *a0) {
190 return (a0->f0 += 1234) ^ (a0->f1 += 5678);
193 // CHECK-OPT-LABEL: define{{.*}} i64 @test_4()
194 // CHECK-OPT: ret i64 4860
195 // CHECK-OPT: }
196 unsigned long long test_4(void) {
197 struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
198 unsigned long long res = 0;
199 res ^= g4.f0 ^ g4.f1;
200 res ^= f4_load(&g4) ^ f4_store(&g4) ^ f4_reload(&g4);
201 res ^= g4.f0 ^ g4.f1;
202 return res;
205 /***/
207 struct s5 {
208 unsigned f0 : 2;
209 _Bool f1 : 1;
210 _Bool f2 : 1;
213 struct s5 g5 = { 0xdeadbeef, 0xdeadbeef };
215 int f5_load(struct s5 *a0) {
216 return a0->f0 ^ a0->f1;
218 int f5_store(struct s5 *a0) {
219 return (a0->f0 = 0xF) ^ (a0->f1 = 0xF) ^ (a0->f2 = 0xF);
221 int f5_reload(struct s5 *a0) {
222 return (a0->f0 += 0xF) ^ (a0->f1 += 0xF) ^ (a0->f2 += 0xF);
225 // CHECK-OPT-LABEL: define{{.*}} i64 @test_5()
226 // CHECK-OPT: ret i64 2
227 // CHECK-OPT: }
228 unsigned long long test_5(void) {
229 struct s5 g5 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
230 unsigned long long res = 0;
231 res ^= g5.f0 ^ g5.f1 ^ g5.f2;
232 res ^= f5_load(&g5) ^ f5_store(&g5) ^ f5_reload(&g5);
233 res ^= g5.f0 ^ g5.f1 ^ g5.f2;
234 return res;
237 /***/
239 struct s6 {
240 unsigned f0 : 2;
243 struct s6 g6 = { 0xF };
245 int f6_load(struct s6 *a0) {
246 return a0->f0;
248 int f6_store(struct s6 *a0) {
249 return a0->f0 = 0x0;
251 int f6_reload(struct s6 *a0) {
252 return (a0->f0 += 0xF);
255 // CHECK-OPT-LABEL: define{{.*}} zeroext i1 @test_6()
256 // CHECK-OPT: ret i1 true
257 // CHECK-OPT: }
258 _Bool test_6(void) {
259 struct s6 g6 = { 0xF };
260 unsigned long long res = 0;
261 res ^= g6.f0;
262 res ^= f6_load(&g6);
263 res ^= g6.f0;
264 return res;
267 /***/
269 // Check that we compute the best alignment possible for each access.
271 // CHECK-RECORD: *** Dumping IRgen Record Layout
272 // CHECK-RECORD: Record: RecordDecl{{.*}}s7
273 // CHECK-RECORD: Layout: <CGRecordLayout
274 // CHECK-RECORD: LLVMType:%struct.s7 = type <{ i32, i32, i32, i64, [12 x i8] }>
275 // CHECK-RECORD: IsZeroInitializable:1
276 // CHECK-RECORD: BitFields:[
277 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:64 StorageOffset:12
278 // CHECK-RECORD: <CGBitFieldInfo Offset:32 Size:29 IsSigned:1 StorageSize:64 StorageOffset:12
280 struct __attribute__((aligned(16))) s7 {
281 int a, b, c;
282 int f0 : 5;
283 int f1 : 29;
286 int f7_load(struct s7 *a0) {
287 return a0->f0;
290 /***/
292 // This is a case where we narrow the access width immediately.
294 struct __attribute__((packed)) s8 {
295 char f0 : 4;
296 char f1;
297 int f2 : 4;
298 char f3 : 4;
301 struct s8 g8 = { 0xF };
303 int f8_load(struct s8 *a0) {
304 return a0->f0 ^ a0 ->f2 ^ a0->f3;
306 int f8_store(struct s8 *a0) {
307 return (a0->f0 = 0xFD) ^ (a0->f2 = 0xFD) ^ (a0->f3 = 0xFD);
309 int f8_reload(struct s8 *a0) {
310 return (a0->f0 += 0xFD) ^ (a0->f2 += 0xFD) ^ (a0->f3 += 0xFD);
313 // CHECK-OPT-LABEL: define{{.*}} i32 @test_8()
314 // CHECK-OPT: ret i32 -3
315 // CHECK-OPT: }
316 unsigned test_8(void) {
317 struct s8 g8 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
318 unsigned long long res = 0;
319 res ^= g8.f0 ^ g8.f2 ^ g8.f3;
320 res ^= f8_load(&g8) ^ f8_store(&g8) ^ f8_reload(&g8);
321 res ^= g8.f0 ^ g8.f2 ^ g8.f3;
322 return res;
325 /***/
327 // This is another case where we narrow the access width immediately.
328 struct __attribute__((packed)) s9 {
329 unsigned f0 : 7;
330 unsigned f1 : 7;
331 unsigned f2 : 7;
332 unsigned f3 : 7;
333 unsigned f4 : 7;
334 unsigned f5 : 7;
335 unsigned f6 : 7;
336 unsigned f7 : 7;
339 int f9_load(struct s9 *a0) {
340 return a0->f7;