1 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s \
2 // RUN: | FileCheck -check-prefix=CHECK-X86-64 %s
3 // RUN: %clang_cc1 -triple powerpc64-unknown-unknown -emit-llvm -o - %s \
4 // RUN: | FileCheck -check-prefix=CHECK-PPC64 %s
6 // Tests for bitfield access patterns in C++ with special attention to
7 // conformance to C++11 memory model requirements.
10 // Test basic bitfield layout access across interesting byte and word
11 // boundaries on both little endian and big endian platforms.
12 struct __attribute__((packed
)) S
{
22 unsigned read00(S
* s
) {
23 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read00
24 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
25 // CHECK-X86-64: %[[and:.*]] = and i64 %[[val]], 16383
26 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
27 // CHECK-X86-64: ret i32 %[[trunc]]
28 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read00
29 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
30 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 50
31 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[shr]] to i32
32 // CHECK-PPC64: ret i32 %[[trunc]]
35 unsigned read01(S
* s
) {
36 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read01
37 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
38 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 14
39 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3
40 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
41 // CHECK-X86-64: ret i32 %[[trunc]]
42 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read01
43 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
44 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 48
45 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3
46 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
47 // CHECK-PPC64: ret i32 %[[trunc]]
50 unsigned read20(S
* s
) {
51 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read20
52 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
53 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 16
54 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 63
55 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
56 // CHECK-X86-64: ret i32 %[[trunc]]
57 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read20
58 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
59 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 42
60 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 63
61 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
62 // CHECK-PPC64: ret i32 %[[trunc]]
65 unsigned read21(S
* s
) {
66 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read21
67 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
68 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 22
69 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3
70 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
71 // CHECK-X86-64: ret i32 %[[trunc]]
72 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read21
73 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
74 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 40
75 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3
76 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
77 // CHECK-PPC64: ret i32 %[[trunc]]
80 unsigned read30(S
* s
) {
81 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read30
82 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
83 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 24
84 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 1073741823
85 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
86 // CHECK-X86-64: ret i32 %[[trunc]]
87 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read30
88 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
89 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 10
90 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 1073741823
91 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
92 // CHECK-PPC64: ret i32 %[[trunc]]
95 unsigned read31(S
* s
) {
96 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read31
97 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
98 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 54
99 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 3
100 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
101 // CHECK-X86-64: ret i32 %[[trunc]]
102 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read31
103 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
104 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 8
105 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 3
106 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
107 // CHECK-PPC64: ret i32 %[[trunc]]
110 unsigned read70(S
* s
) {
111 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read70
112 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
113 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 56
114 // CHECK-X86-64: %[[and:.*]] = and i64 %[[shr]], 63
115 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
116 // CHECK-X86-64: ret i32 %[[trunc]]
117 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read70
118 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
119 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 2
120 // CHECK-PPC64: %[[and:.*]] = and i64 %[[shr]], 63
121 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
122 // CHECK-PPC64: ret i32 %[[trunc]]
125 unsigned read71(S
* s
) {
126 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read71
127 // CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
128 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 62
129 // CHECK-X86-64: %[[trunc:.*]] = trunc i64 %[[shr]] to i32
130 // CHECK-X86-64: ret i32 %[[trunc]]
131 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N06read71
132 // CHECK-PPC64: %[[val:.*]] = load i64, ptr %{{.*}}
133 // CHECK-PPC64: %[[and:.*]] = and i64 %[[val]], 3
134 // CHECK-PPC64: %[[trunc:.*]] = trunc i64 %[[and]] to i32
135 // CHECK-PPC64: ret i32 %[[trunc]]
141 // Ensure that neither loads nor stores to bitfields are not widened into
142 // other memory locations. (PR13691)
144 // NOTE: We could potentially widen loads based on their alignment if we are
145 // comfortable requiring that subsequent memory locations within the
146 // alignment-widened load are not volatile.
152 unsigned read(S
* s
) {
153 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N14read
154 // CHECK-X86-64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
155 // CHECK-X86-64: %[[val:.*]] = load i8, ptr %[[ptr]]
156 // CHECK-X86-64: %[[and:.*]] = and i8 %[[val]], 1
157 // CHECK-X86-64: %[[ext:.*]] = zext i8 %[[and]] to i32
158 // CHECK-X86-64: ret i32 %[[ext]]
159 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N14read
160 // CHECK-PPC64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
161 // CHECK-PPC64: %[[val:.*]] = load i8, ptr %[[ptr]]
162 // CHECK-PPC64: %[[shr:.*]] = lshr i8 %[[val]], 7
163 // CHECK-PPC64: %[[ext:.*]] = zext i8 %[[shr]] to i32
164 // CHECK-PPC64: ret i32 %[[ext]]
167 void write(S
* s
, unsigned x
) {
168 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N15write
169 // CHECK-X86-64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
170 // CHECK-X86-64: %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8
171 // CHECK-X86-64: %[[old:.*]] = load i8, ptr %[[ptr]]
172 // CHECK-X86-64: %[[x_and:.*]] = and i8 %[[x_trunc]], 1
173 // CHECK-X86-64: %[[old_and:.*]] = and i8 %[[old]], -2
174 // CHECK-X86-64: %[[new:.*]] = or i8 %[[old_and]], %[[x_and]]
175 // CHECK-X86-64: store i8 %[[new]], ptr %[[ptr]]
176 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N15write
177 // CHECK-PPC64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
178 // CHECK-PPC64: %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8
179 // CHECK-PPC64: %[[old:.*]] = load i8, ptr %[[ptr]]
180 // CHECK-PPC64: %[[x_and:.*]] = and i8 %[[x_trunc]], 1
181 // CHECK-PPC64: %[[x_shl:.*]] = shl i8 %[[x_and]], 7
182 // CHECK-PPC64: %[[old_and:.*]] = and i8 %[[old]], 127
183 // CHECK-PPC64: %[[new:.*]] = or i8 %[[old_and]], %[[x_shl]]
184 // CHECK-PPC64: store i8 %[[new]], ptr %[[ptr]]
190 // Do widen loads and stores to bitfields when those bitfields have padding
191 // within the struct following them.
196 unsigned read(S
* s
) {
197 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N24read
198 // CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
199 // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215
200 // CHECK-X86-64: ret i32 %[[and]]
201 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N24read
202 // CHECK-PPC64: %[[val:.*]] = load i32, ptr %{{.*}}
203 // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8
204 // CHECK-PPC64: ret i32 %[[shr]]
207 void write(S
* s
, unsigned x
) {
208 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N25write
209 // CHECK-X86-64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
210 // CHECK-X86-64: %[[old:.*]] = load i32, ptr %[[ptr]]
211 // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
212 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216
213 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]]
214 // CHECK-X86-64: store i32 %[[new]], ptr %{{.*}}
215 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N25write
216 // CHECK-PPC64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
217 // CHECK-PPC64: %[[old:.*]] = load i32, ptr %[[ptr]]
218 // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
219 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8
220 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255
221 // CHECK-PPC64: %[[new:.*]] = or i32 %[[old_and]], %[[x_shl]]
222 // CHECK-PPC64: store i32 %[[new]], ptr %{{.*}}
228 // Do widen loads and stores to bitfields through the trailing padding at the
233 unsigned read(S
* s
) {
234 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N34read
235 // CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
236 // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215
237 // CHECK-X86-64: ret i32 %[[and]]
238 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N34read
239 // CHECK-PPC64: %[[val:.*]] = load i32, ptr %{{.*}}
240 // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8
241 // CHECK-PPC64: ret i32 %[[shr]]
244 void write(S
* s
, unsigned x
) {
245 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N35write
246 // CHECK-X86-64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
247 // CHECK-X86-64: %[[old:.*]] = load i32, ptr %[[ptr]]
248 // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
249 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216
250 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]]
251 // CHECK-X86-64: store i32 %[[new]], ptr %{{.*}}
252 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N35write
253 // CHECK-PPC64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
254 // CHECK-PPC64: %[[old:.*]] = load i32, ptr %[[ptr]]
255 // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
256 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8
257 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255
258 // CHECK-PPC64: %[[new:.*]] = or i32 %[[old_and]], %[[x_shl]]
259 // CHECK-PPC64: store i32 %[[new]], ptr %{{.*}}
265 // Do NOT widen loads and stores to bitfields into padding at the end of
266 // a class which might end up with members inside of it when inside a derived
273 // Imagine some other translation unit introduces:
275 struct Derived
: public Base
{
279 unsigned read(Base
* s
) {
280 // FIXME: We should widen this load as long as the function isn't being
281 // instrumented by ThreadSanitizer.
283 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N44read
284 // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
285 // CHECK-X86-64: %[[val:.*]] = load i24, ptr %[[gep]]
286 // CHECK-X86-64: %[[ext:.*]] = zext i24 %[[val]] to i32
287 // CHECK-X86-64: ret i32 %[[ext]]
288 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N44read
289 // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
290 // CHECK-PPC64: %[[val:.*]] = load i24, ptr %[[gep]]
291 // CHECK-PPC64: %[[ext:.*]] = zext i24 %[[val]] to i32
292 // CHECK-PPC64: ret i32 %[[ext]]
295 void write(Base
* s
, unsigned x
) {
296 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N45write
297 // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
298 // CHECK-X86-64: %[[new:.*]] = trunc i32 %{{.*}} to i24
299 // CHECK-X86-64: store i24 %[[new]], ptr %[[gep]]
300 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N45write
301 // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
302 // CHECK-PPC64: %[[new:.*]] = trunc i32 %{{.*}} to i24
303 // CHECK-PPC64: store i24 %[[new]], ptr %[[gep]]
309 // Widen through padding at the end of a struct even if that struct
310 // participates in a union with another struct which has a separate field in
311 // that location. The reasoning is that if the operation is storing to that
312 // member of the union, it must be the active member, and thus we can write
313 // through the padding. If it is a load, it might be a load of a common
314 // prefix through a non-active member, but in such a case the extra bits
315 // loaded are masked off anyways.
317 struct X
{ unsigned b
: 24; char c
; } x
;
318 struct Y
{ unsigned b
: 24; } y
;
320 unsigned read(U
* u
) {
321 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N54read
322 // CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
323 // CHECK-X86-64: %[[and:.*]] = and i32 %[[val]], 16777215
324 // CHECK-X86-64: ret i32 %[[and]]
325 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N54read
326 // CHECK-PPC64: %[[val:.*]] = load i32, ptr %{{.*}}
327 // CHECK-PPC64: %[[shr:.*]] = lshr i32 %[[val]], 8
328 // CHECK-PPC64: ret i32 %[[shr]]
331 void write(U
* u
, unsigned x
) {
332 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N55write
333 // CHECK-X86-64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
334 // CHECK-X86-64: %[[old:.*]] = load i32, ptr %[[ptr]]
335 // CHECK-X86-64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
336 // CHECK-X86-64: %[[old_and:.*]] = and i32 %[[old]], -16777216
337 // CHECK-X86-64: %[[new:.*]] = or i32 %[[old_and]], %[[x_and]]
338 // CHECK-X86-64: store i32 %[[new]], ptr %{{.*}}
339 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N55write
340 // CHECK-PPC64: %[[ptr:.*]] = load ptr, ptr %{{.*}}
341 // CHECK-PPC64: %[[old:.*]] = load i32, ptr %[[ptr]]
342 // CHECK-PPC64: %[[x_and:.*]] = and i32 %{{.*}}, 16777215
343 // CHECK-PPC64: %[[x_shl:.*]] = shl i32 %[[x_and]], 8
344 // CHECK-PPC64: %[[old_and:.*]] = and i32 %[[old]], 255
345 // CHECK-PPC64: %[[new:.*]] = or i32 %[[old_and]], %[[x_shl]]
346 // CHECK-PPC64: store i32 %[[new]], ptr %{{.*}}
352 // Zero-length bitfields partition the memory locations of bitfields for the
353 // purposes of the memory model. That means stores must not span zero-length
354 // bitfields and loads may only span them when we are not instrumenting with
356 // FIXME: We currently don't widen loads even without ThreadSanitizer, even
361 unsigned char b2
: 8;
363 unsigned read(S
* s
) {
364 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N64read
365 // CHECK-X86-64: %[[val1:.*]] = load i24, ptr %{{.*}}
366 // CHECK-X86-64: %[[ext1:.*]] = zext i24 %[[val1]] to i32
367 // CHECK-X86-64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
368 // CHECK-X86-64: %[[val2:.*]] = load i8, ptr %[[ptr2]]
369 // CHECK-X86-64: %[[ext2:.*]] = zext i8 %[[val2]] to i32
370 // CHECK-X86-64: %[[add:.*]] = add nsw i32 %[[ext1]], %[[ext2]]
371 // CHECK-X86-64: ret i32 %[[add]]
372 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N64read
373 // CHECK-PPC64: %[[val1:.*]] = load i24, ptr %{{.*}}
374 // CHECK-PPC64: %[[ext1:.*]] = zext i24 %[[val1]] to i32
375 // CHECK-PPC64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
376 // CHECK-PPC64: %[[val2:.*]] = load i8, ptr %[[ptr2]]
377 // CHECK-PPC64: %[[ext2:.*]] = zext i8 %[[val2]] to i32
378 // CHECK-PPC64: %[[add:.*]] = add nsw i32 %[[ext1]], %[[ext2]]
379 // CHECK-PPC64: ret i32 %[[add]]
380 return s
->b1
+ s
->b2
;
382 void write(S
* s
, unsigned x
) {
383 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N65write
384 // CHECK-X86-64: %[[new1:.*]] = trunc i32 %{{.*}} to i24
385 // CHECK-X86-64: store i24 %[[new1]], ptr %{{.*}}
386 // CHECK-X86-64: %[[new2:.*]] = trunc i32 %{{.*}} to i8
387 // CHECK-X86-64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
388 // CHECK-X86-64: store i8 %[[new2]], ptr %[[ptr2]]
389 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N65write
390 // CHECK-PPC64: %[[new1:.*]] = trunc i32 %{{.*}} to i24
391 // CHECK-PPC64: store i24 %[[new1]], ptr %{{.*}}
392 // CHECK-PPC64: %[[new2:.*]] = trunc i32 %{{.*}} to i8
393 // CHECK-PPC64: %[[ptr2:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
394 // CHECK-PPC64: store i8 %[[new2]], ptr %[[ptr2]]
401 // Similar to N4 except that this adds a virtual base to the picture. (PR18430)
402 // Do NOT widen loads and stores to bitfields into padding at the end of
403 // a class which might end up with members inside of it when inside a derived
409 struct B2
: virtual B1
{
413 // Imagine some other translation unit introduces:
415 struct Derived
: public B2
{
419 unsigned read(B2
* s
) {
420 // FIXME: We should widen this load as long as the function isn't being
421 // instrumented by ThreadSanitizer.
423 // CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N74read
424 // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
425 // CHECK-X86-64: %[[val:.*]] = load i24, ptr %[[gep]]
426 // CHECK-X86-64: %[[ext:.*]] = zext i24 %[[val]] to i32
427 // CHECK-X86-64: ret i32 %[[ext]]
428 // CHECK-PPC64-LABEL: define{{.*}} zeroext i32 @_ZN2N74read
429 // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
430 // CHECK-PPC64: %[[val:.*]] = load i24, ptr %[[gep]]
431 // CHECK-PPC64: %[[ext:.*]] = zext i24 %[[val]] to i32
432 // CHECK-PPC64: ret i32 %[[ext]]
435 void write(B2
* s
, unsigned x
) {
436 // CHECK-X86-64-LABEL: define{{.*}} void @_ZN2N75write
437 // CHECK-X86-64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
438 // CHECK-X86-64: %[[new:.*]] = trunc i32 %{{.*}} to i24
439 // CHECK-X86-64: store i24 %[[new]], ptr %[[gep]]
440 // CHECK-PPC64-LABEL: define{{.*}} void @_ZN2N75write
441 // CHECK-PPC64: %[[gep:.*]] = getelementptr inbounds {{.*}}, ptr %{{.*}}, i32 0, i32 1
442 // CHECK-PPC64: %[[new:.*]] = trunc i32 %{{.*}} to i24
443 // CHECK-PPC64: store i24 %[[new]], ptr %[[gep]]