1 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOCOMPAT
2 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 -fclang-abi-compat=6.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
3 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-scei-ps4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
4 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-sie-ps5 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6COMPAT
6 extern int int_source();
7 extern void int_sink(int x
);
21 struct __attribute__((packed
, aligned(2))) C
: A
, B
{
24 // These accesses should have alignment 4 because they're at offset 0
25 // in a reference with an assumed alignment of 4.
26 // CHECK-LABEL: @_ZN5test01aERNS_1BE
28 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
29 // CHECK: [[B_P:%.*]] = load ptr, ptr
30 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
31 // CHECK: [[OLD_VALUE:%.*]] = load i8, ptr [[B_P]], align 4
32 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
33 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
34 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
35 // CHECK: store i8 [[T2]], ptr [[B_P]], align 4
36 b
.onebit
= int_source();
38 // CHECK: [[B_P:%.*]] = load ptr, ptr
39 // CHECK: [[VALUE:%.*]] = load i8, ptr [[B_P]], align 4
40 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
41 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
42 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
43 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
47 // These accesses should have alignment 2 because they're at offset 8
48 // in a reference/pointer with an assumed alignment of 2.
49 // CHECK-LABEL: @_ZN5test01bERNS_1CE
51 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
52 // CHECK: [[C_P:%.*]] = load ptr, ptr
53 // CHECK: [[FIELD_P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
54 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
55 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 2
56 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 4
57 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
58 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
59 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
60 // CHECK-V6COMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 2
61 // CHECK-NOCOMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 4
62 c
.onebit
= int_source();
64 // CHECK: [[C_P:%.*]] = load ptr, ptr
65 // CHECK: [[FIELD_P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
66 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 2
67 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 4
68 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
69 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
70 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
71 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
75 // CHECK-LABEL: @_ZN5test01cEPNS_1CE
77 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
78 // CHECK: [[C_P:%.*]] = load ptr, ptr
79 // CHECK: [[FIELD_P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
80 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
81 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 2
82 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 4
83 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
84 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
85 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
86 // CHECK-V6COMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 2
87 // CHECK-NOCOMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 4
88 c
->onebit
= int_source();
90 // CHECK: [[C_P:%.*]] = load ptr, ptr
91 // CHECK: [[P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
92 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, ptr [[P]], align 2
93 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, ptr [[P]], align 4
94 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
95 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
96 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
97 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
101 // These accesses should have alignment 2 because they're at offset 8
102 // in an alignment-2 variable.
103 // CHECK-LABEL: @_ZN5test01dEv
105 // CHECK-V6COMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 2
106 // CHECK-NOCOMPAT: [[C_P:%.*]] = alloca [[C:%.*]], align 4
109 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
110 // CHECK: [[FIELD_P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
111 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
112 // CHECK-V6COMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 2
113 // CHECK-NOCOMPAT: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 4
114 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
115 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
116 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
117 // CHECK-V6COMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 2
118 // CHECK-NOCOMPAT: store i8 [[T2]], ptr [[FIELD_P]], align 4
119 c
.onebit
= int_source();
121 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
122 // CHECK-V6COMPAT: [[VALUE:%.*]] = load i8, ptr [[T1]], align 2
123 // CHECK-NOCOMPAT: [[VALUE:%.*]] = load i8, ptr [[T1]], align 4
124 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
125 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
126 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
127 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
131 // These accesses should have alignment 8 because they're at offset 8
132 // in an alignment-16 variable.
133 // CHECK-LABEL: @_ZN5test01eEv
135 // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
136 __attribute__((aligned(16))) C c
;
138 // CHECK: [[CALL:%.*]] = call noundef i32 @_Z10int_sourcev()
139 // CHECK: [[FIELD_P:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
140 // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
141 // CHECK: [[OLD_VALUE:%.*]] = load i8, ptr [[FIELD_P]], align 8
142 // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
143 // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
144 // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
145 // CHECK: store i8 [[T2]], ptr [[FIELD_P]], align 8
146 c
.onebit
= int_source();
148 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, ptr [[C_P]], i64 8
149 // CHECK: [[VALUE:%.*]] = load i8, ptr [[T1]], align 8
150 // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
151 // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
152 // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
153 // CHECK: call void @_Z8int_sinki(i32 noundef [[T2]])
164 __attribute__((aligned(16))) Array aArray
;
167 struct B
: virtual A
{
168 void *bPointer
; // puts bArray at offset 16
172 struct C
: virtual A
{ // must be viable as primary base
173 // Non-empty, nv-size not a multiple of 16.
178 // Proof of concept that the non-virtual components of B do not have
179 // to be 16-byte-aligned.
182 // For the following tests, we want to assign into a variable whose
183 // alignment is high enough that it will absolutely not be the
184 // constraint on the memcpy alignment.
185 typedef __attribute__((aligned(64))) Array AlignedArray
;
187 // CHECK-LABEL: @_ZN5test11aERNS_1AE
189 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
190 // CHECK: [[A_P:%.*]] = load ptr, ptr
191 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A:%.*]], ptr [[A_P]], i32 0, i32 0
192 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 16 [[ARRAY_P]], i64 16, i1 false)
193 AlignedArray result
= a
.aArray
;
196 // CHECK-LABEL: @_ZN5test11bERNS_1BE
198 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
199 // CHECK: [[B_P:%.*]] = load ptr, ptr
200 // CHECK: [[VPTR:%.*]] = load ptr, ptr [[B_P]], align 8
201 // CHECK: [[T0:%.*]] = getelementptr i8, ptr [[VPTR]], i64 -24
202 // CHECK: [[OFFSET:%.*]] = load i64, ptr [[T0]], align 8
203 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, ptr [[B_P]], i64 [[OFFSET]]
204 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], ptr [[T1]], i32 0, i32 0
205 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 16 [[ARRAY_P]], i64 16, i1 false)
206 AlignedArray result
= b
.aArray
;
209 // CHECK-LABEL: @_ZN5test11cERNS_1BE
211 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
212 // CHECK: [[B_P:%.*]] = load ptr, ptr
213 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[B_P]], i32 0, i32 2
214 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 8 [[ARRAY_P]], i64 16, i1 false)
215 AlignedArray result
= b
.bArray
;
218 // CHECK-LABEL: @_ZN5test11dEPNS_1BE
220 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
221 // CHECK: [[B_P:%.*]] = load ptr, ptr
222 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[B_P]], i32 0, i32 2
223 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 8 [[ARRAY_P]], i64 16, i1 false)
224 AlignedArray result
= b
->bArray
;
227 // CHECK-LABEL: @_ZN5test11eEv
229 // CHECK: [[B_P:%.*]] = alloca [[B:%.*]], align 16
230 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
231 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[B_P]], i32 0, i32 2
232 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 16 [[ARRAY_P]], i64 16, i1 false)
234 AlignedArray result
= b
.bArray
;
237 // CHECK-LABEL: @_ZN5test11fEv
239 // TODO: we should devirtualize this derived-to-base conversion.
240 // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
241 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
242 // CHECK: [[VPTR:%.*]] = load ptr, ptr [[D_P]], align 16
243 // CHECK: [[T0:%.*]] = getelementptr i8, ptr [[VPTR]], i64 -24
244 // CHECK: [[OFFSET:%.*]] = load i64, ptr [[T0]], align 8
245 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, ptr [[D_P]], i64 [[OFFSET]]
246 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], ptr [[T1]], i32 0, i32 0
247 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 16 [[ARRAY_P]], i64 16, i1 false)
249 AlignedArray result
= d
.aArray
;
252 // CHECK-LABEL: @_ZN5test11gEv
254 // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
255 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
256 // CHECK: [[T1:%.*]] = getelementptr inbounds i8, ptr [[D_P]], i64 24
257 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[T1]], i32 0, i32 2
258 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 8 [[ARRAY_P]], i64 16, i1 false)
260 AlignedArray result
= d
.bArray
;
263 // CHECK-LABEL: @_ZN5test11hEPA_NS_1BE
265 // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
266 // CHECK: [[B_P:%.*]] = load ptr, ptr
267 // CHECK: [[ELEMENT_P:%.*]] = getelementptr inbounds [0 x [[B]]], ptr [[B_P]], i64 0
268 // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], ptr [[ELEMENT_P]], i32 0, i32 2
269 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[RESULT]], ptr align 16 [[ARRAY_P]], i64 16, i1 false)
270 AlignedArray result
= (*b
)->bArray
;
274 // CHECK-LABEL: @_Z22incomplete_array_derefPA_i
275 // CHECK: load i32, ptr {{%.*}}, align 4
276 int incomplete_array_deref(int (*p
)[]) { return (*p
)[2]; }