1 // RUN: %clang_cc1 -emit-llvm -triple x86_64 -O3 -o %t.opt.ll %s \
2 // RUN: -fdump-record-layouts > %t.dump.txt
3 // RUN: FileCheck -check-prefix=CHECK-RECORD < %t.dump.txt %s
4 // RUN: FileCheck -check-prefix=CHECK-OPT < %t.opt.ll %s
8 // Check that we don't read off the end a packed 24-bit structure.
11 // CHECK-RECORD: *** Dumping IRgen Record Layout
12 // CHECK-RECORD: Record: RecordDecl{{.*}}s0
13 // CHECK-RECORD: Layout: <CGRecordLayout
14 // CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
15 // CHECK-RECORD: IsZeroInitializable:1
16 // CHECK-RECORD: BitFields:[
17 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0
18 struct __attribute((packed
)) s0
{
22 struct s0 g0
= { 0xdeadbeef };
24 int f0_load(struct s0
*a0
) {
25 int size_check
[sizeof(struct s0
) == 3 ? 1 : -1];
28 int f0_store(struct s0
*a0
) {
31 int f0_reload(struct s0
*a0
) {
35 // CHECK-OPT-LABEL: define{{.*}} i64 @test_0()
36 // CHECK-OPT: ret i64 1
38 unsigned long long test_0(void) {
39 struct s0 g0
= { 0xdeadbeef };
40 unsigned long long res
= 0;
42 res
^= f0_load(&g0
) ^ f0_store(&g0
) ^ f0_reload(&g0
);
51 // CHECK-RECORD: *** Dumping IRgen Record Layout
52 // CHECK-RECORD: Record: RecordDecl{{.*}}s1
53 // CHECK-RECORD: Layout: <CGRecordLayout
54 // CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
55 // CHECK-RECORD: IsZeroInitializable:1
56 // CHECK-RECORD: BitFields:[
57 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
58 // CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
62 struct __attribute((packed
)) s1
{
68 struct s1 g1
= { 0xdeadbeef, 0xdeadbeef };
70 int f1_load(struct s1
*a0
) {
71 int size_check
[sizeof(struct s1
) == 3 ? 1 : -1];
74 int f1_store(struct s1
*a0
) {
75 return (a0
->f1
= 1234);
77 int f1_reload(struct s1
*a0
) {
78 return (a0
->f1
+= 1234);
81 // CHECK-OPT-LABEL: define{{.*}} i64 @test_1()
82 // CHECK-OPT: ret i64 210
84 unsigned long long test_1(void) {
85 struct s1 g1
= { 0xdeadbeef, 0xdeadbeef };
86 unsigned long long res
= 0;
88 res
^= f1_load(&g1
) ^ f1_store(&g1
) ^ f1_reload(&g1
);
95 // Check that we don't access beyond the bounds of a union.
99 // CHECK-RECORD: *** Dumping IRgen Record Layout
100 // CHECK-RECORD: Record: RecordDecl{{.*}}u2
101 // CHECK-RECORD: Layout: <CGRecordLayout
102 // CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
103 // CHECK-RECORD: IsZeroInitializable:1
104 // CHECK-RECORD: BitFields:[
105 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0
107 union __attribute__((packed
)) u2
{
108 unsigned long long f0
: 3;
111 union u2 g2
= { 0xdeadbeef };
113 int f2_load(union u2
*a0
) {
116 int f2_store(union u2
*a0
) {
117 return (a0
->f0
= 1234);
119 int f2_reload(union u2
*a0
) {
120 return (a0
->f0
+= 1234);
123 // CHECK-OPT-LABEL: define{{.*}} i64 @test_2()
124 // CHECK-OPT: ret i64 2
126 unsigned long long test_2(void) {
127 union u2 g2
= { 0xdeadbeef };
128 unsigned long long res
= 0;
130 res
^= f2_load(&g2
) ^ f2_store(&g2
) ^ f2_reload(&g2
);
144 struct s3 g3
= { 0xdeadbeef, 0xdeadbeef };
146 int f3_load(struct s3
*a0
) {
150 int f3_store(struct s3
*a0
) {
152 return (a0
->f0
= 1234);
154 int f3_reload(struct s3
*a0
) {
156 return (a0
->f0
+= 1234);
159 // CHECK-OPT-LABEL: define{{.*}} i64 @test_3()
160 // CHECK-OPT: ret i64 -559039940
162 unsigned long long test_3(void) {
163 struct s3 g3
= { 0xdeadbeef, 0xdeadbeef };
164 unsigned long long res
= 0;
165 res
^= g3
.f0
^ g3
.f1
;
166 res
^= f3_load(&g3
) ^ f3_store(&g3
) ^ f3_reload(&g3
);
167 res
^= g3
.f0
^ g3
.f1
;
173 // This is a case where the bitfield access will straddle an alignment boundary
174 // of its underlying type.
178 unsigned f1
: 28 __attribute__ ((packed
));
181 struct s4 g4
= { 0xdeadbeef, 0xdeadbeef };
183 int f4_load(struct s4
*a0
) {
184 return a0
->f0
^ a0
->f1
;
186 int f4_store(struct s4
*a0
) {
187 return (a0
->f0
= 1234) ^ (a0
->f1
= 5678);
189 int f4_reload(struct s4
*a0
) {
190 return (a0
->f0
+= 1234) ^ (a0
->f1
+= 5678);
193 // CHECK-OPT-LABEL: define{{.*}} i64 @test_4()
194 // CHECK-OPT: ret i64 4860
196 unsigned long long test_4(void) {
197 struct s4 g4
= { 0xdeadbeef, 0xdeadbeef };
198 unsigned long long res
= 0;
199 res
^= g4
.f0
^ g4
.f1
;
200 res
^= f4_load(&g4
) ^ f4_store(&g4
) ^ f4_reload(&g4
);
201 res
^= g4
.f0
^ g4
.f1
;
213 struct s5 g5
= { 0xdeadbeef, 0xdeadbeef };
215 int f5_load(struct s5
*a0
) {
216 return a0
->f0
^ a0
->f1
;
218 int f5_store(struct s5
*a0
) {
219 return (a0
->f0
= 0xF) ^ (a0
->f1
= 0xF) ^ (a0
->f2
= 0xF);
221 int f5_reload(struct s5
*a0
) {
222 return (a0
->f0
+= 0xF) ^ (a0
->f1
+= 0xF) ^ (a0
->f2
+= 0xF);
225 // CHECK-OPT-LABEL: define{{.*}} i64 @test_5()
226 // CHECK-OPT: ret i64 2
228 unsigned long long test_5(void) {
229 struct s5 g5
= { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
230 unsigned long long res
= 0;
231 res
^= g5
.f0
^ g5
.f1
^ g5
.f2
;
232 res
^= f5_load(&g5
) ^ f5_store(&g5
) ^ f5_reload(&g5
);
233 res
^= g5
.f0
^ g5
.f1
^ g5
.f2
;
243 struct s6 g6
= { 0xF };
245 int f6_load(struct s6
*a0
) {
248 int f6_store(struct s6
*a0
) {
251 int f6_reload(struct s6
*a0
) {
252 return (a0
->f0
+= 0xF);
255 // CHECK-OPT-LABEL: define{{.*}} zeroext i1 @test_6()
256 // CHECK-OPT: ret i1 true
259 struct s6 g6
= { 0xF };
260 unsigned long long res
= 0;
269 // Check that we compute the best alignment possible for each access.
271 // CHECK-RECORD: *** Dumping IRgen Record Layout
272 // CHECK-RECORD: Record: RecordDecl{{.*}}s7
273 // CHECK-RECORD: Layout: <CGRecordLayout
274 // CHECK-RECORD: LLVMType:%struct.s7 = type <{ i32, i32, i32, i64, [12 x i8] }>
275 // CHECK-RECORD: IsZeroInitializable:1
276 // CHECK-RECORD: BitFields:[
277 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:64 StorageOffset:12
278 // CHECK-RECORD: <CGBitFieldInfo Offset:32 Size:29 IsSigned:1 StorageSize:64 StorageOffset:12
280 struct __attribute__((aligned(16))) s7
{
286 int f7_load(struct s7
*a0
) {
292 // This is a case where we narrow the access width immediately.
294 struct __attribute__((packed
)) s8
{
301 struct s8 g8
= { 0xF };
303 int f8_load(struct s8
*a0
) {
304 return a0
->f0
^ a0
->f2
^ a0
->f3
;
306 int f8_store(struct s8
*a0
) {
307 return (a0
->f0
= 0xFD) ^ (a0
->f2
= 0xFD) ^ (a0
->f3
= 0xFD);
309 int f8_reload(struct s8
*a0
) {
310 return (a0
->f0
+= 0xFD) ^ (a0
->f2
+= 0xFD) ^ (a0
->f3
+= 0xFD);
313 // CHECK-OPT-LABEL: define{{.*}} i32 @test_8()
314 // CHECK-OPT: ret i32 -3
316 unsigned test_8(void) {
317 struct s8 g8
= { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
318 unsigned long long res
= 0;
319 res
^= g8
.f0
^ g8
.f2
^ g8
.f3
;
320 res
^= f8_load(&g8
) ^ f8_store(&g8
) ^ f8_reload(&g8
);
321 res
^= g8
.f0
^ g8
.f2
^ g8
.f3
;
327 // This is another case where we narrow the access width immediately.
328 struct __attribute__((packed
)) s9
{
339 int f9_load(struct s9
*a0
) {