[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / clang / test / CodeGen / aapcs-bitfield.c
blob152ee26e7a3ea94a60be2ccd546c666a2cd49bd9
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple armv8-none-linux-eabi -fno-aapcs-bitfield-width -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefix=LE
3 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi -fno-aapcs-bitfield-width -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefix=BE
5 // RUN: %clang_cc1 -triple armv8-none-linux-eabi -faapcs-bitfield-load -fno-aapcs-bitfield-width -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=LENUMLOADS
6 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi -faapcs-bitfield-load -fno-aapcs-bitfield-width -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=BENUMLOADS
8 // RUN: %clang_cc1 -triple armv8-none-linux-eabi -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefix=LEWIDTH
9 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefix=BEWIDTH
11 // RUN: %clang_cc1 -triple armv8-none-linux-eabi -faapcs-bitfield-load -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=LEWIDTHNUM
12 // RUN: %clang_cc1 -triple armebv8-none-linux-eabi -faapcs-bitfield-load -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s -check-prefixes=BEWIDTHNUM
14 struct st0 {
15 short c : 7;
18 // LE-LABEL: @st0_check_load(
19 // LE-NEXT: entry:
20 // LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
21 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
22 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
23 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
24 // LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
25 // LE-NEXT: ret i32 [[CONV]]
27 // BE-LABEL: @st0_check_load(
28 // BE-NEXT: entry:
29 // BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
30 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
31 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
32 // BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
33 // BE-NEXT: ret i32 [[CONV]]
35 // LENUMLOADS-LABEL: @st0_check_load(
36 // LENUMLOADS-NEXT: entry:
37 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
38 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
39 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
40 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
41 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
42 // LENUMLOADS-NEXT: ret i32 [[CONV]]
44 // BENUMLOADS-LABEL: @st0_check_load(
45 // BENUMLOADS-NEXT: entry:
46 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
47 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
48 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
49 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
50 // BENUMLOADS-NEXT: ret i32 [[CONV]]
52 // LEWIDTH-LABEL: @st0_check_load(
53 // LEWIDTH-NEXT: entry:
54 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
55 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
56 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
57 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
58 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
59 // LEWIDTH-NEXT: ret i32 [[CONV]]
61 // BEWIDTH-LABEL: @st0_check_load(
62 // BEWIDTH-NEXT: entry:
63 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
64 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
65 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
66 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
67 // BEWIDTH-NEXT: ret i32 [[CONV]]
69 // LEWIDTHNUM-LABEL: @st0_check_load(
70 // LEWIDTHNUM-NEXT: entry:
71 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
72 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
73 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
74 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
75 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
76 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
78 // BEWIDTHNUM-LABEL: @st0_check_load(
79 // BEWIDTHNUM-NEXT: entry:
80 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
81 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
82 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
83 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
84 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
86 int st0_check_load(struct st0 *m) {
87 return m->c;
90 // LE-LABEL: @st0_check_store(
91 // LE-NEXT: entry:
92 // LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
93 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
94 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
95 // LE-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
96 // LE-NEXT: ret void
98 // BE-LABEL: @st0_check_store(
99 // BE-NEXT: entry:
100 // BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
101 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
102 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
103 // BE-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
104 // BE-NEXT: ret void
106 // LENUMLOADS-LABEL: @st0_check_store(
107 // LENUMLOADS-NEXT: entry:
108 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
109 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
110 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
111 // LENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
112 // LENUMLOADS-NEXT: ret void
114 // BENUMLOADS-LABEL: @st0_check_store(
115 // BENUMLOADS-NEXT: entry:
116 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
117 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
118 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
119 // BENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
120 // BENUMLOADS-NEXT: ret void
122 // LEWIDTH-LABEL: @st0_check_store(
123 // LEWIDTH-NEXT: entry:
124 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
125 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
126 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
127 // LEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
128 // LEWIDTH-NEXT: ret void
130 // BEWIDTH-LABEL: @st0_check_store(
131 // BEWIDTH-NEXT: entry:
132 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
133 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
134 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
135 // BEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
136 // BEWIDTH-NEXT: ret void
138 // LEWIDTHNUM-LABEL: @st0_check_store(
139 // LEWIDTHNUM-NEXT: entry:
140 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
141 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
142 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
143 // LEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
144 // LEWIDTHNUM-NEXT: ret void
146 // BEWIDTHNUM-LABEL: @st0_check_store(
147 // BEWIDTHNUM-NEXT: entry:
148 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[M:%.*]], align 2
149 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
150 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
151 // BEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[M]], align 2
152 // BEWIDTHNUM-NEXT: ret void
154 void st0_check_store(struct st0 *m) {
155 m->c = 1;
158 struct st1 {
159 int a : 10;
160 short c : 6;
163 // LE-LABEL: @st1_check_load(
164 // LE-NEXT: entry:
165 // LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
166 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
167 // LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
168 // LE-NEXT: ret i32 [[CONV]]
170 // BE-LABEL: @st1_check_load(
171 // BE-NEXT: entry:
172 // BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
173 // BE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
174 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 10
175 // BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
176 // BE-NEXT: ret i32 [[CONV]]
178 // LENUMLOADS-LABEL: @st1_check_load(
179 // LENUMLOADS-NEXT: entry:
180 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
181 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
182 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
183 // LENUMLOADS-NEXT: ret i32 [[CONV]]
185 // BENUMLOADS-LABEL: @st1_check_load(
186 // BENUMLOADS-NEXT: entry:
187 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
188 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
189 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 10
190 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
191 // BENUMLOADS-NEXT: ret i32 [[CONV]]
193 // LEWIDTH-LABEL: @st1_check_load(
194 // LEWIDTH-NEXT: entry:
195 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
196 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
197 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
198 // LEWIDTH-NEXT: ret i32 [[CONV]]
200 // BEWIDTH-LABEL: @st1_check_load(
201 // BEWIDTH-NEXT: entry:
202 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
203 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
204 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 10
205 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
206 // BEWIDTH-NEXT: ret i32 [[CONV]]
208 // LEWIDTHNUM-LABEL: @st1_check_load(
209 // LEWIDTHNUM-NEXT: entry:
210 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
211 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
212 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
213 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
215 // BEWIDTHNUM-LABEL: @st1_check_load(
216 // BEWIDTHNUM-NEXT: entry:
217 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
218 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
219 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 10
220 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
221 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
223 int st1_check_load(struct st1 *m) {
224 return m->c;
227 // LE-LABEL: @st1_check_store(
228 // LE-NEXT: entry:
229 // LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
230 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
231 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
232 // LE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
233 // LE-NEXT: ret void
235 // BE-LABEL: @st1_check_store(
236 // BE-NEXT: entry:
237 // BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
238 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
239 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
240 // BE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
241 // BE-NEXT: ret void
243 // LENUMLOADS-LABEL: @st1_check_store(
244 // LENUMLOADS-NEXT: entry:
245 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
246 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
247 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
248 // LENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
249 // LENUMLOADS-NEXT: ret void
251 // BENUMLOADS-LABEL: @st1_check_store(
252 // BENUMLOADS-NEXT: entry:
253 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
254 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
255 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
256 // BENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
257 // BENUMLOADS-NEXT: ret void
259 // LEWIDTH-LABEL: @st1_check_store(
260 // LEWIDTH-NEXT: entry:
261 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
262 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
263 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
264 // LEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
265 // LEWIDTH-NEXT: ret void
267 // BEWIDTH-LABEL: @st1_check_store(
268 // BEWIDTH-NEXT: entry:
269 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
270 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
271 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
272 // BEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
273 // BEWIDTH-NEXT: ret void
275 // LEWIDTHNUM-LABEL: @st1_check_store(
276 // LEWIDTHNUM-NEXT: entry:
277 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
278 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
279 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
280 // LEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
281 // LEWIDTHNUM-NEXT: ret void
283 // BEWIDTHNUM-LABEL: @st1_check_store(
284 // BEWIDTHNUM-NEXT: entry:
285 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
286 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
287 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
288 // BEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
289 // BEWIDTHNUM-NEXT: ret void
291 void st1_check_store(struct st1 *m) {
292 m->c = 1;
295 struct st2 {
296 int a : 10;
297 short c : 7;
300 // LE-LABEL: @st2_check_load(
301 // LE-NEXT: entry:
302 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
303 // LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
304 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
305 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
306 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
307 // LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
308 // LE-NEXT: ret i32 [[CONV]]
310 // BE-LABEL: @st2_check_load(
311 // BE-NEXT: entry:
312 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
313 // BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
314 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
315 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
316 // BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
317 // BE-NEXT: ret i32 [[CONV]]
319 // LENUMLOADS-LABEL: @st2_check_load(
320 // LENUMLOADS-NEXT: entry:
321 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
322 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
323 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
324 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
325 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
326 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
327 // LENUMLOADS-NEXT: ret i32 [[CONV]]
329 // BENUMLOADS-LABEL: @st2_check_load(
330 // BENUMLOADS-NEXT: entry:
331 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
332 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
333 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
334 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
335 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
336 // BENUMLOADS-NEXT: ret i32 [[CONV]]
338 // LEWIDTH-LABEL: @st2_check_load(
339 // LEWIDTH-NEXT: entry:
340 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
341 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
342 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
343 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
344 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
345 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
346 // LEWIDTH-NEXT: ret i32 [[CONV]]
348 // BEWIDTH-LABEL: @st2_check_load(
349 // BEWIDTH-NEXT: entry:
350 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
351 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
352 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
353 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
354 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
355 // BEWIDTH-NEXT: ret i32 [[CONV]]
357 // LEWIDTHNUM-LABEL: @st2_check_load(
358 // LEWIDTHNUM-NEXT: entry:
359 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
360 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
361 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
362 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
363 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
364 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
365 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
367 // BEWIDTHNUM-LABEL: @st2_check_load(
368 // BEWIDTHNUM-NEXT: entry:
369 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
370 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
371 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
372 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
373 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
374 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
376 int st2_check_load(struct st2 *m) {
377 return m->c;
380 // LE-LABEL: @st2_check_store(
381 // LE-NEXT: entry:
382 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
383 // LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
384 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
385 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
386 // LE-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
387 // LE-NEXT: ret void
389 // BE-LABEL: @st2_check_store(
390 // BE-NEXT: entry:
391 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
392 // BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
393 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
394 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
395 // BE-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
396 // BE-NEXT: ret void
398 // LENUMLOADS-LABEL: @st2_check_store(
399 // LENUMLOADS-NEXT: entry:
400 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
401 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
402 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
403 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
404 // LENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
405 // LENUMLOADS-NEXT: ret void
407 // BENUMLOADS-LABEL: @st2_check_store(
408 // BENUMLOADS-NEXT: entry:
409 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
410 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
411 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
412 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
413 // BENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
414 // BENUMLOADS-NEXT: ret void
416 // LEWIDTH-LABEL: @st2_check_store(
417 // LEWIDTH-NEXT: entry:
418 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
419 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
420 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
421 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
422 // LEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
423 // LEWIDTH-NEXT: ret void
425 // BEWIDTH-LABEL: @st2_check_store(
426 // BEWIDTH-NEXT: entry:
427 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
428 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
429 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
430 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
431 // BEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
432 // BEWIDTH-NEXT: ret void
434 // LEWIDTHNUM-LABEL: @st2_check_store(
435 // LEWIDTHNUM-NEXT: entry:
436 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
437 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
438 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
439 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
440 // LEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
441 // LEWIDTHNUM-NEXT: ret void
443 // BEWIDTHNUM-LABEL: @st2_check_store(
444 // BEWIDTHNUM-NEXT: entry:
445 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
446 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
447 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
448 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
449 // BEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
450 // BEWIDTHNUM-NEXT: ret void
452 void st2_check_store(struct st2 *m) {
453 m->c = 1;
455 // Volatile access is allowed to use 16 bits
456 struct st3 {
457 volatile short c : 7;
460 // LE-LABEL: @st3_check_load(
461 // LE-NEXT: entry:
462 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
463 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
464 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
465 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
466 // LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
467 // LE-NEXT: ret i32 [[CONV]]
469 // BE-LABEL: @st3_check_load(
470 // BE-NEXT: entry:
471 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
472 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
473 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
474 // BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
475 // BE-NEXT: ret i32 [[CONV]]
477 // LENUMLOADS-LABEL: @st3_check_load(
478 // LENUMLOADS-NEXT: entry:
479 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
480 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
481 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
482 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
483 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
484 // LENUMLOADS-NEXT: ret i32 [[CONV]]
486 // BENUMLOADS-LABEL: @st3_check_load(
487 // BENUMLOADS-NEXT: entry:
488 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
489 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
490 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
491 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
492 // BENUMLOADS-NEXT: ret i32 [[CONV]]
494 // LEWIDTH-LABEL: @st3_check_load(
495 // LEWIDTH-NEXT: entry:
496 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
497 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
498 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 9
499 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
500 // LEWIDTH-NEXT: ret i32 [[CONV]]
502 // BEWIDTH-LABEL: @st3_check_load(
503 // BEWIDTH-NEXT: entry:
504 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
505 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 9
506 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
507 // BEWIDTH-NEXT: ret i32 [[CONV]]
509 // LEWIDTHNUM-LABEL: @st3_check_load(
510 // LEWIDTHNUM-NEXT: entry:
511 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
512 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
513 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 9
514 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
515 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
517 // BEWIDTHNUM-LABEL: @st3_check_load(
518 // BEWIDTHNUM-NEXT: entry:
519 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
520 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 9
521 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
522 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
524 int st3_check_load(struct st3 *m) {
525 return m->c;
528 // LE-LABEL: @st3_check_store(
529 // LE-NEXT: entry:
530 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
531 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
532 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
533 // LE-NEXT: store volatile i8 [[BF_SET]], ptr [[M]], align 2
534 // LE-NEXT: ret void
536 // BE-LABEL: @st3_check_store(
537 // BE-NEXT: entry:
538 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
539 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
540 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
541 // BE-NEXT: store volatile i8 [[BF_SET]], ptr [[M]], align 2
542 // BE-NEXT: ret void
544 // LENUMLOADS-LABEL: @st3_check_store(
545 // LENUMLOADS-NEXT: entry:
546 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
547 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
548 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
549 // LENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[M]], align 2
550 // LENUMLOADS-NEXT: ret void
552 // BENUMLOADS-LABEL: @st3_check_store(
553 // BENUMLOADS-NEXT: entry:
554 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 2
555 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
556 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
557 // BENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[M]], align 2
558 // BENUMLOADS-NEXT: ret void
560 // LEWIDTH-LABEL: @st3_check_store(
561 // LEWIDTH-NEXT: entry:
562 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
563 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -128
564 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
565 // LEWIDTH-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 2
566 // LEWIDTH-NEXT: ret void
568 // BEWIDTH-LABEL: @st3_check_store(
569 // BEWIDTH-NEXT: entry:
570 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
571 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 511
572 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
573 // BEWIDTH-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 2
574 // BEWIDTH-NEXT: ret void
576 // LEWIDTHNUM-LABEL: @st3_check_store(
577 // LEWIDTHNUM-NEXT: entry:
578 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
579 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -128
580 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
581 // LEWIDTHNUM-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 2
582 // LEWIDTHNUM-NEXT: ret void
584 // BEWIDTHNUM-LABEL: @st3_check_store(
585 // BEWIDTHNUM-NEXT: entry:
586 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 2
587 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 511
588 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
589 // BEWIDTHNUM-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 2
590 // BEWIDTHNUM-NEXT: ret void
592 void st3_check_store(struct st3 *m) {
593 m->c = 1;
595 // Volatile access to st4.c should use a char ld/st
596 struct st4 {
597 int b : 9;
598 volatile char c : 5;
601 // LE-LABEL: @st4_check_load(
602 // LE-NEXT: entry:
603 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
604 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 2
605 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
606 // LE-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR]] to i8
607 // LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
608 // LE-NEXT: ret i32 [[CONV]]
610 // BE-LABEL: @st4_check_load(
611 // BE-NEXT: entry:
612 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
613 // BE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
614 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
615 // BE-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR]] to i8
616 // BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
617 // BE-NEXT: ret i32 [[CONV]]
619 // LENUMLOADS-LABEL: @st4_check_load(
620 // LENUMLOADS-NEXT: entry:
621 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
622 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 2
623 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
624 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR]] to i8
625 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
626 // LENUMLOADS-NEXT: ret i32 [[CONV]]
628 // BENUMLOADS-LABEL: @st4_check_load(
629 // BENUMLOADS-NEXT: entry:
630 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
631 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
632 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 11
633 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR]] to i8
634 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
635 // BENUMLOADS-NEXT: ret i32 [[CONV]]
637 // LEWIDTH-LABEL: @st4_check_load(
638 // LEWIDTH-NEXT: entry:
639 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
640 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
641 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
642 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
643 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
644 // LEWIDTH-NEXT: ret i32 [[CONV]]
646 // BEWIDTH-LABEL: @st4_check_load(
647 // BEWIDTH-NEXT: entry:
648 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
649 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
650 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
651 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
652 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
653 // BEWIDTH-NEXT: ret i32 [[CONV]]
655 // LEWIDTHNUM-LABEL: @st4_check_load(
656 // LEWIDTHNUM-NEXT: entry:
657 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
658 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
659 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
660 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
661 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
662 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
664 // BEWIDTHNUM-LABEL: @st4_check_load(
665 // BEWIDTHNUM-NEXT: entry:
666 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
667 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
668 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
669 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
670 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
671 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
673 int st4_check_load(struct st4 *m) {
674 return m->c;
677 // LE-LABEL: @st4_check_store(
678 // LE-NEXT: entry:
679 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
680 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -15873
681 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
682 // LE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
683 // LE-NEXT: ret void
685 // BE-LABEL: @st4_check_store(
686 // BE-NEXT: entry:
687 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
688 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -125
689 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
690 // BE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
691 // BE-NEXT: ret void
693 // LENUMLOADS-LABEL: @st4_check_store(
694 // LENUMLOADS-NEXT: entry:
695 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
696 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -15873
697 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
698 // LENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
699 // LENUMLOADS-NEXT: ret void
701 // BENUMLOADS-LABEL: @st4_check_store(
702 // BENUMLOADS-NEXT: entry:
703 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
704 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -125
705 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
706 // BENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
707 // BENUMLOADS-NEXT: ret void
709 // LEWIDTH-LABEL: @st4_check_store(
710 // LEWIDTH-NEXT: entry:
711 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
712 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
713 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
714 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
715 // LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
716 // LEWIDTH-NEXT: ret void
718 // BEWIDTH-LABEL: @st4_check_store(
719 // BEWIDTH-NEXT: entry:
720 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
721 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
722 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
723 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
724 // BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
725 // BEWIDTH-NEXT: ret void
727 // LEWIDTHNUM-LABEL: @st4_check_store(
728 // LEWIDTHNUM-NEXT: entry:
729 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
730 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
731 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
732 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
733 // LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
734 // LEWIDTHNUM-NEXT: ret void
736 // BEWIDTHNUM-LABEL: @st4_check_store(
737 // BEWIDTHNUM-NEXT: entry:
738 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
739 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
740 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
741 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
742 // BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
743 // BEWIDTHNUM-NEXT: ret void
745 void st4_check_store(struct st4 *m) {
746 m->c = 1;
749 // LE-LABEL: @st4_check_nonv_store(
750 // LE-NEXT: entry:
751 // LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
752 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
753 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
754 // LE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
755 // LE-NEXT: ret void
757 // BE-LABEL: @st4_check_nonv_store(
758 // BE-NEXT: entry:
759 // BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
760 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
761 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
762 // BE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
763 // BE-NEXT: ret void
765 // LENUMLOADS-LABEL: @st4_check_nonv_store(
766 // LENUMLOADS-NEXT: entry:
767 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
768 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
769 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
770 // LENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
771 // LENUMLOADS-NEXT: ret void
773 // BENUMLOADS-LABEL: @st4_check_nonv_store(
774 // BENUMLOADS-NEXT: entry:
775 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
776 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
777 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
778 // BENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
779 // BENUMLOADS-NEXT: ret void
781 // LEWIDTH-LABEL: @st4_check_nonv_store(
782 // LEWIDTH-NEXT: entry:
783 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
784 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
785 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
786 // LEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
787 // LEWIDTH-NEXT: ret void
789 // BEWIDTH-LABEL: @st4_check_nonv_store(
790 // BEWIDTH-NEXT: entry:
791 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
792 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
793 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
794 // BEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
795 // BEWIDTH-NEXT: ret void
797 // LEWIDTHNUM-LABEL: @st4_check_nonv_store(
798 // LEWIDTHNUM-NEXT: entry:
799 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
800 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
801 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
802 // LEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
803 // LEWIDTHNUM-NEXT: ret void
805 // BEWIDTHNUM-LABEL: @st4_check_nonv_store(
806 // BEWIDTHNUM-NEXT: entry:
807 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
808 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
809 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
810 // BEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
811 // BEWIDTHNUM-NEXT: ret void
813 void st4_check_nonv_store(struct st4 *m) {
814 m->b = 1;
817 struct st5 {
818 int a : 12;
819 volatile char c : 5;
822 // LE-LABEL: @st5_check_load(
823 // LE-NEXT: entry:
824 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
825 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
826 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
827 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
828 // LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
829 // LE-NEXT: ret i32 [[CONV]]
831 // BE-LABEL: @st5_check_load(
832 // BE-NEXT: entry:
833 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
834 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
835 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
836 // BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
837 // BE-NEXT: ret i32 [[CONV]]
839 // LENUMLOADS-LABEL: @st5_check_load(
840 // LENUMLOADS-NEXT: entry:
841 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
842 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
843 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
844 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
845 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
846 // LENUMLOADS-NEXT: ret i32 [[CONV]]
848 // BENUMLOADS-LABEL: @st5_check_load(
849 // BENUMLOADS-NEXT: entry:
850 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
851 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
852 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
853 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
854 // BENUMLOADS-NEXT: ret i32 [[CONV]]
856 // LEWIDTH-LABEL: @st5_check_load(
857 // LEWIDTH-NEXT: entry:
858 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
859 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
860 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
861 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
862 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
863 // LEWIDTH-NEXT: ret i32 [[CONV]]
865 // BEWIDTH-LABEL: @st5_check_load(
866 // BEWIDTH-NEXT: entry:
867 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
868 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
869 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
870 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
871 // BEWIDTH-NEXT: ret i32 [[CONV]]
873 // LEWIDTHNUM-LABEL: @st5_check_load(
874 // LEWIDTHNUM-NEXT: entry:
875 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
876 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
877 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
878 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
879 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
880 // LEWIDTHNUM-NEXT: ret i32 [[CONV]]
882 // BEWIDTHNUM-LABEL: @st5_check_load(
883 // BEWIDTHNUM-NEXT: entry:
884 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
885 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
886 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
887 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
888 // BEWIDTHNUM-NEXT: ret i32 [[CONV]]
890 int st5_check_load(struct st5 *m) {
891 return m->c;
894 // LE-LABEL: @st5_check_store(
895 // LE-NEXT: entry:
896 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
897 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
898 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
899 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
900 // LE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
901 // LE-NEXT: ret void
903 // BE-LABEL: @st5_check_store(
904 // BE-NEXT: entry:
905 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
906 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
907 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
908 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
909 // BE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
910 // BE-NEXT: ret void
912 // LENUMLOADS-LABEL: @st5_check_store(
913 // LENUMLOADS-NEXT: entry:
914 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
915 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
916 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
917 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
918 // LENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
919 // LENUMLOADS-NEXT: ret void
921 // BENUMLOADS-LABEL: @st5_check_store(
922 // BENUMLOADS-NEXT: entry:
923 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
924 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
925 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
926 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
927 // BENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
928 // BENUMLOADS-NEXT: ret void
930 // LEWIDTH-LABEL: @st5_check_store(
931 // LEWIDTH-NEXT: entry:
932 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
933 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
934 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
935 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
936 // LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
937 // LEWIDTH-NEXT: ret void
939 // BEWIDTH-LABEL: @st5_check_store(
940 // BEWIDTH-NEXT: entry:
941 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
942 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
943 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
944 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
945 // BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
946 // BEWIDTH-NEXT: ret void
948 // LEWIDTHNUM-LABEL: @st5_check_store(
949 // LEWIDTHNUM-NEXT: entry:
950 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
951 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
952 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
953 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
954 // LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
955 // LEWIDTHNUM-NEXT: ret void
957 // BEWIDTHNUM-LABEL: @st5_check_store(
958 // BEWIDTHNUM-NEXT: entry:
959 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
960 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
961 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
962 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
963 // BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
964 // BEWIDTHNUM-NEXT: ret void
966 void st5_check_store(struct st5 *m) {
967 m->c = 1;
970 struct st6 {
971 int a : 12;
972 char b;
973 int c : 5;
976 // LE-LABEL: @st6_check_load(
977 // LE-NEXT: entry:
978 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
979 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
980 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
981 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
982 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
983 // LE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
984 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
985 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
986 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
987 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
988 // LE-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
989 // LE-NEXT: [[BF_ASHR3:%.*]] = ashr i8 [[BF_SHL2]], 3
990 // LE-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
991 // LE-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
992 // LE-NEXT: ret i32 [[ADD5]]
994 // BE-LABEL: @st6_check_load(
995 // BE-NEXT: entry:
996 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
997 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
998 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
999 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1000 // BE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1001 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1002 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1003 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1004 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1005 // BE-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1006 // BE-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1007 // BE-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1008 // BE-NEXT: ret i32 [[ADD4]]
1010 // LENUMLOADS-LABEL: @st6_check_load(
1011 // LENUMLOADS-NEXT: entry:
1012 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1013 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1014 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
1015 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1016 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1017 // LENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1018 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1019 // LENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1020 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1021 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1022 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1023 // LENUMLOADS-NEXT: [[BF_ASHR3:%.*]] = ashr i8 [[BF_SHL2]], 3
1024 // LENUMLOADS-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1025 // LENUMLOADS-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1026 // LENUMLOADS-NEXT: ret i32 [[ADD5]]
1028 // BENUMLOADS-LABEL: @st6_check_load(
1029 // BENUMLOADS-NEXT: entry:
1030 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1031 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1032 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1033 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1034 // BENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1035 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1036 // BENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1037 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1038 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1039 // BENUMLOADS-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1040 // BENUMLOADS-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1041 // BENUMLOADS-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1042 // BENUMLOADS-NEXT: ret i32 [[ADD4]]
1044 // LEWIDTH-LABEL: @st6_check_load(
1045 // LEWIDTH-NEXT: entry:
1046 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1047 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1048 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
1049 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1050 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1051 // LEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1052 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1053 // LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1054 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1055 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1056 // LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1057 // LEWIDTH-NEXT: [[BF_ASHR3:%.*]] = ashr i8 [[BF_SHL2]], 3
1058 // LEWIDTH-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1059 // LEWIDTH-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1060 // LEWIDTH-NEXT: ret i32 [[ADD5]]
1062 // BEWIDTH-LABEL: @st6_check_load(
1063 // BEWIDTH-NEXT: entry:
1064 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1065 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1066 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1067 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1068 // BEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1069 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1070 // BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1071 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1072 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1073 // BEWIDTH-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1074 // BEWIDTH-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1075 // BEWIDTH-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1076 // BEWIDTH-NEXT: ret i32 [[ADD4]]
1078 // LEWIDTHNUM-LABEL: @st6_check_load(
1079 // LEWIDTHNUM-NEXT: entry:
1080 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1081 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
1082 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
1083 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1084 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1085 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1086 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1087 // LEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1088 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1089 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1090 // LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
1091 // LEWIDTHNUM-NEXT: [[BF_ASHR3:%.*]] = ashr i8 [[BF_SHL2]], 3
1092 // LEWIDTHNUM-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
1093 // LEWIDTHNUM-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
1094 // LEWIDTHNUM-NEXT: ret i32 [[ADD5]]
1096 // BEWIDTHNUM-LABEL: @st6_check_load(
1097 // BEWIDTHNUM-NEXT: entry:
1098 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1099 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
1100 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1101 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1102 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
1103 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
1104 // BEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
1105 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1106 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
1107 // BEWIDTHNUM-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
1108 // BEWIDTHNUM-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
1109 // BEWIDTHNUM-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
1110 // BEWIDTHNUM-NEXT: ret i32 [[ADD4]]
1112 int st6_check_load(volatile struct st6 *m) {
1113 int x = m->a;
1114 x += m->b;
1115 x += m->c;
1116 return x;
1119 // LE-LABEL: @st6_check_store(
1120 // LE-NEXT: entry:
1121 // LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1122 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1123 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1124 // LE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1125 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1126 // LE-NEXT: store i8 2, ptr [[B]], align 2
1127 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1128 // LE-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1129 // LE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1130 // LE-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1131 // LE-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1132 // LE-NEXT: ret void
1134 // BE-LABEL: @st6_check_store(
1135 // BE-NEXT: entry:
1136 // BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1137 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1138 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1139 // BE-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1140 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1141 // BE-NEXT: store i8 2, ptr [[B]], align 2
1142 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1143 // BE-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1144 // BE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1145 // BE-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1146 // BE-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1147 // BE-NEXT: ret void
1149 // LENUMLOADS-LABEL: @st6_check_store(
1150 // LENUMLOADS-NEXT: entry:
1151 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1152 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1153 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1154 // LENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1155 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1156 // LENUMLOADS-NEXT: store i8 2, ptr [[B]], align 2
1157 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1158 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1159 // LENUMLOADS-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1160 // LENUMLOADS-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1161 // LENUMLOADS-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1162 // LENUMLOADS-NEXT: ret void
1164 // BENUMLOADS-LABEL: @st6_check_store(
1165 // BENUMLOADS-NEXT: entry:
1166 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1167 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1168 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1169 // BENUMLOADS-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1170 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1171 // BENUMLOADS-NEXT: store i8 2, ptr [[B]], align 2
1172 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1173 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1174 // BENUMLOADS-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1175 // BENUMLOADS-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1176 // BENUMLOADS-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1177 // BENUMLOADS-NEXT: ret void
1179 // LEWIDTH-LABEL: @st6_check_store(
1180 // LEWIDTH-NEXT: entry:
1181 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1182 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1183 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1184 // LEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1185 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1186 // LEWIDTH-NEXT: store i8 2, ptr [[B]], align 2
1187 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1188 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1189 // LEWIDTH-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1190 // LEWIDTH-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1191 // LEWIDTH-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1192 // LEWIDTH-NEXT: ret void
1194 // BEWIDTH-LABEL: @st6_check_store(
1195 // BEWIDTH-NEXT: entry:
1196 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1197 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1198 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1199 // BEWIDTH-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1200 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1201 // BEWIDTH-NEXT: store i8 2, ptr [[B]], align 2
1202 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1203 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1204 // BEWIDTH-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1205 // BEWIDTH-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1206 // BEWIDTH-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1207 // BEWIDTH-NEXT: ret void
1209 // LEWIDTHNUM-LABEL: @st6_check_store(
1210 // LEWIDTHNUM-NEXT: entry:
1211 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1212 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
1213 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
1214 // LEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1215 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1216 // LEWIDTHNUM-NEXT: store i8 2, ptr [[B]], align 2
1217 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1218 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1219 // LEWIDTHNUM-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
1220 // LEWIDTHNUM-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
1221 // LEWIDTHNUM-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1222 // LEWIDTHNUM-NEXT: ret void
1224 // BEWIDTHNUM-LABEL: @st6_check_store(
1225 // BEWIDTHNUM-NEXT: entry:
1226 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[M:%.*]], align 4
1227 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
1228 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
1229 // BEWIDTHNUM-NEXT: store i16 [[BF_SET]], ptr [[M]], align 4
1230 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
1231 // BEWIDTHNUM-NEXT: store i8 2, ptr [[B]], align 2
1232 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
1233 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i8, ptr [[C]], align 1
1234 // BEWIDTHNUM-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
1235 // BEWIDTHNUM-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
1236 // BEWIDTHNUM-NEXT: store i8 [[BF_SET3]], ptr [[C]], align 1
1237 // BEWIDTHNUM-NEXT: ret void
1239 void st6_check_store(struct st6 *m) {
1240 m->a = 1;
1241 m->b = 2;
1242 m->c = 3;
1245 // Nested structs and bitfields.
1246 struct st7a {
1247 char a;
1248 int b : 5;
1251 struct st7b {
1252 char x;
1253 volatile struct st7a y;
1256 // LE-LABEL: @st7_check_load(
1257 // LE-NEXT: entry:
1258 // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1259 // LE-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1260 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1261 // LE-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1262 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1263 // LE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1264 // LE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1265 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1266 // LE-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1267 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1268 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1269 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1270 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
1271 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1272 // LE-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1273 // LE-NEXT: ret i32 [[ADD3]]
1275 // BE-LABEL: @st7_check_load(
1276 // BE-NEXT: entry:
1277 // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1278 // BE-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1279 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1280 // BE-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1281 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1282 // BE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1283 // BE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1284 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1285 // BE-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1286 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1287 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1288 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1289 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1290 // BE-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1291 // BE-NEXT: ret i32 [[ADD3]]
1293 // LENUMLOADS-LABEL: @st7_check_load(
1294 // LENUMLOADS-NEXT: entry:
1295 // LENUMLOADS-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1296 // LENUMLOADS-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1297 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1298 // LENUMLOADS-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1299 // LENUMLOADS-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1300 // LENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1301 // LENUMLOADS-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1302 // LENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1303 // LENUMLOADS-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1304 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1305 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1306 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1307 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
1308 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1309 // LENUMLOADS-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1310 // LENUMLOADS-NEXT: ret i32 [[ADD3]]
1312 // BENUMLOADS-LABEL: @st7_check_load(
1313 // BENUMLOADS-NEXT: entry:
1314 // BENUMLOADS-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1315 // BENUMLOADS-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1316 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1317 // BENUMLOADS-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1318 // BENUMLOADS-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1319 // BENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1320 // BENUMLOADS-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1321 // BENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1322 // BENUMLOADS-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1323 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1324 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1325 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1326 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1327 // BENUMLOADS-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1328 // BENUMLOADS-NEXT: ret i32 [[ADD3]]
1330 // LEWIDTH-LABEL: @st7_check_load(
1331 // LEWIDTH-NEXT: entry:
1332 // LEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1333 // LEWIDTH-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1334 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1335 // LEWIDTH-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1336 // LEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1337 // LEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1338 // LEWIDTH-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1339 // LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1340 // LEWIDTH-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1341 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1342 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1343 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1344 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
1345 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1346 // LEWIDTH-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1347 // LEWIDTH-NEXT: ret i32 [[ADD3]]
1349 // BEWIDTH-LABEL: @st7_check_load(
1350 // BEWIDTH-NEXT: entry:
1351 // BEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1352 // BEWIDTH-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1353 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1354 // BEWIDTH-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1355 // BEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1356 // BEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1357 // BEWIDTH-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1358 // BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1359 // BEWIDTH-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1360 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1361 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1362 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1363 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1364 // BEWIDTH-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1365 // BEWIDTH-NEXT: ret i32 [[ADD3]]
1367 // LEWIDTHNUM-LABEL: @st7_check_load(
1368 // LEWIDTHNUM-NEXT: entry:
1369 // LEWIDTHNUM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1370 // LEWIDTHNUM-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1371 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1372 // LEWIDTHNUM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1373 // LEWIDTHNUM-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1374 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1375 // LEWIDTHNUM-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1376 // LEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1377 // LEWIDTHNUM-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1378 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1379 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1380 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
1381 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
1382 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1383 // LEWIDTHNUM-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1384 // LEWIDTHNUM-NEXT: ret i32 [[ADD3]]
1386 // BEWIDTHNUM-LABEL: @st7_check_load(
1387 // BEWIDTHNUM-NEXT: entry:
1388 // BEWIDTHNUM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1389 // BEWIDTHNUM-NEXT: [[TMP0:%.*]] = load i8, ptr [[X]], align 4
1390 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
1391 // BEWIDTHNUM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1392 // BEWIDTHNUM-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1393 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[A]], align 4
1394 // BEWIDTHNUM-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
1395 // BEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV1]]
1396 // BEWIDTHNUM-NEXT: [[Y2:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1397 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y2]], i32 0, i32 1
1398 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1399 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
1400 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
1401 // BEWIDTHNUM-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
1402 // BEWIDTHNUM-NEXT: ret i32 [[ADD3]]
1404 int st7_check_load(struct st7b *m) {
1405 int r = m->x;
1406 r += m->y.a;
1407 r += m->y.b;
1408 return r;
1411 // LE-LABEL: @st7_check_store(
1412 // LE-NEXT: entry:
1413 // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1414 // LE-NEXT: store i8 1, ptr [[X]], align 4
1415 // LE-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1416 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1417 // LE-NEXT: store volatile i8 2, ptr [[A]], align 4
1418 // LE-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1419 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1420 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1421 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1422 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1423 // LE-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1424 // LE-NEXT: ret void
1426 // BE-LABEL: @st7_check_store(
1427 // BE-NEXT: entry:
1428 // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1429 // BE-NEXT: store i8 1, ptr [[X]], align 4
1430 // BE-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1431 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1432 // BE-NEXT: store volatile i8 2, ptr [[A]], align 4
1433 // BE-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1434 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1435 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1436 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1437 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1438 // BE-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1439 // BE-NEXT: ret void
1441 // LENUMLOADS-LABEL: @st7_check_store(
1442 // LENUMLOADS-NEXT: entry:
1443 // LENUMLOADS-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1444 // LENUMLOADS-NEXT: store i8 1, ptr [[X]], align 4
1445 // LENUMLOADS-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1446 // LENUMLOADS-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1447 // LENUMLOADS-NEXT: store volatile i8 2, ptr [[A]], align 4
1448 // LENUMLOADS-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1449 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1450 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1451 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1452 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1453 // LENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1454 // LENUMLOADS-NEXT: ret void
1456 // BENUMLOADS-LABEL: @st7_check_store(
1457 // BENUMLOADS-NEXT: entry:
1458 // BENUMLOADS-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1459 // BENUMLOADS-NEXT: store i8 1, ptr [[X]], align 4
1460 // BENUMLOADS-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1461 // BENUMLOADS-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1462 // BENUMLOADS-NEXT: store volatile i8 2, ptr [[A]], align 4
1463 // BENUMLOADS-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1464 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1465 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1466 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1467 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1468 // BENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1469 // BENUMLOADS-NEXT: ret void
1471 // LEWIDTH-LABEL: @st7_check_store(
1472 // LEWIDTH-NEXT: entry:
1473 // LEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1474 // LEWIDTH-NEXT: store i8 1, ptr [[X]], align 4
1475 // LEWIDTH-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1476 // LEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1477 // LEWIDTH-NEXT: store volatile i8 2, ptr [[A]], align 4
1478 // LEWIDTH-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1479 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1480 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1481 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1482 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1483 // LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1484 // LEWIDTH-NEXT: ret void
1486 // BEWIDTH-LABEL: @st7_check_store(
1487 // BEWIDTH-NEXT: entry:
1488 // BEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1489 // BEWIDTH-NEXT: store i8 1, ptr [[X]], align 4
1490 // BEWIDTH-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1491 // BEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1492 // BEWIDTH-NEXT: store volatile i8 2, ptr [[A]], align 4
1493 // BEWIDTH-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1494 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1495 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1496 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1497 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1498 // BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1499 // BEWIDTH-NEXT: ret void
1501 // LEWIDTHNUM-LABEL: @st7_check_store(
1502 // LEWIDTHNUM-NEXT: entry:
1503 // LEWIDTHNUM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1504 // LEWIDTHNUM-NEXT: store i8 1, ptr [[X]], align 4
1505 // LEWIDTHNUM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1506 // LEWIDTHNUM-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1507 // LEWIDTHNUM-NEXT: store volatile i8 2, ptr [[A]], align 4
1508 // LEWIDTHNUM-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1509 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1510 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1511 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
1512 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
1513 // LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1514 // LEWIDTHNUM-NEXT: ret void
1516 // BEWIDTHNUM-LABEL: @st7_check_store(
1517 // BEWIDTHNUM-NEXT: entry:
1518 // BEWIDTHNUM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], ptr [[M:%.*]], i32 0, i32 0
1519 // BEWIDTHNUM-NEXT: store i8 1, ptr [[X]], align 4
1520 // BEWIDTHNUM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1521 // BEWIDTHNUM-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7A:%.*]], ptr [[Y]], i32 0, i32 0
1522 // BEWIDTHNUM-NEXT: store volatile i8 2, ptr [[A]], align 4
1523 // BEWIDTHNUM-NEXT: [[Y1:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], ptr [[M]], i32 0, i32 2
1524 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7A]], ptr [[Y1]], i32 0, i32 1
1525 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
1526 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
1527 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
1528 // BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[B]], align 1
1529 // BEWIDTHNUM-NEXT: ret void
1531 void st7_check_store(struct st7b *m) {
1532 m->x = 1;
1533 m->y.a = 2;
1534 m->y.b = 3;
1537 // Check overflowing assignments to bitfields.
1538 struct st8 {
1539 unsigned f : 16;
1542 // LE-LABEL: @st8_check_assignment(
1543 // LE-NEXT: entry:
1544 // LE-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1545 // LE-NEXT: ret i32 65535
1547 // BE-LABEL: @st8_check_assignment(
1548 // BE-NEXT: entry:
1549 // BE-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1550 // BE-NEXT: ret i32 65535
1552 // LENUMLOADS-LABEL: @st8_check_assignment(
1553 // LENUMLOADS-NEXT: entry:
1554 // LENUMLOADS-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1555 // LENUMLOADS-NEXT: ret i32 65535
1557 // BENUMLOADS-LABEL: @st8_check_assignment(
1558 // BENUMLOADS-NEXT: entry:
1559 // BENUMLOADS-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1560 // BENUMLOADS-NEXT: ret i32 65535
1562 // LEWIDTH-LABEL: @st8_check_assignment(
1563 // LEWIDTH-NEXT: entry:
1564 // LEWIDTH-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1565 // LEWIDTH-NEXT: ret i32 65535
1567 // BEWIDTH-LABEL: @st8_check_assignment(
1568 // BEWIDTH-NEXT: entry:
1569 // BEWIDTH-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1570 // BEWIDTH-NEXT: ret i32 65535
1572 // LEWIDTHNUM-LABEL: @st8_check_assignment(
1573 // LEWIDTHNUM-NEXT: entry:
1574 // LEWIDTHNUM-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1575 // LEWIDTHNUM-NEXT: ret i32 65535
1577 // BEWIDTHNUM-LABEL: @st8_check_assignment(
1578 // BEWIDTHNUM-NEXT: entry:
1579 // BEWIDTHNUM-NEXT: store i16 -1, ptr [[M:%.*]], align 4
1580 // BEWIDTHNUM-NEXT: ret i32 65535
1582 int st8_check_assignment(struct st8 *m) {
1583 return m->f = 0xffff;
1586 struct st9{
1587 int f : 8;
1590 // LE-LABEL: @read_st9(
1591 // LE-NEXT: entry:
1592 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1593 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1594 // LE-NEXT: ret i32 [[BF_CAST]]
1596 // BE-LABEL: @read_st9(
1597 // BE-NEXT: entry:
1598 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1599 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1600 // BE-NEXT: ret i32 [[BF_CAST]]
1602 // LENUMLOADS-LABEL: @read_st9(
1603 // LENUMLOADS-NEXT: entry:
1604 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1605 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1606 // LENUMLOADS-NEXT: ret i32 [[BF_CAST]]
1608 // BENUMLOADS-LABEL: @read_st9(
1609 // BENUMLOADS-NEXT: entry:
1610 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1611 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1612 // BENUMLOADS-NEXT: ret i32 [[BF_CAST]]
1614 // LEWIDTH-LABEL: @read_st9(
1615 // LEWIDTH-NEXT: entry:
1616 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1617 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1618 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1619 // LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
1621 // BEWIDTH-LABEL: @read_st9(
1622 // BEWIDTH-NEXT: entry:
1623 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1624 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1625 // BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
1627 // LEWIDTHNUM-LABEL: @read_st9(
1628 // LEWIDTHNUM-NEXT: entry:
1629 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1630 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1631 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1632 // LEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
1634 // BEWIDTHNUM-LABEL: @read_st9(
1635 // BEWIDTHNUM-NEXT: entry:
1636 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1637 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1638 // BEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
1640 int read_st9(volatile struct st9 *m) {
1641 return m->f;
1644 // LE-LABEL: @store_st9(
1645 // LE-NEXT: entry:
1646 // LE-NEXT: store volatile i8 1, ptr [[M:%.*]], align 4
1647 // LE-NEXT: ret void
1649 // BE-LABEL: @store_st9(
1650 // BE-NEXT: entry:
1651 // BE-NEXT: store volatile i8 1, ptr [[M:%.*]], align 4
1652 // BE-NEXT: ret void
1654 // LENUMLOADS-LABEL: @store_st9(
1655 // LENUMLOADS-NEXT: entry:
1656 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1657 // LENUMLOADS-NEXT: store volatile i8 1, ptr [[M]], align 4
1658 // LENUMLOADS-NEXT: ret void
1660 // BENUMLOADS-LABEL: @store_st9(
1661 // BENUMLOADS-NEXT: entry:
1662 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1663 // BENUMLOADS-NEXT: store volatile i8 1, ptr [[M]], align 4
1664 // BENUMLOADS-NEXT: ret void
1666 // LEWIDTH-LABEL: @store_st9(
1667 // LEWIDTH-NEXT: entry:
1668 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1669 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -256
1670 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 1
1671 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1672 // LEWIDTH-NEXT: ret void
1674 // BEWIDTH-LABEL: @store_st9(
1675 // BEWIDTH-NEXT: entry:
1676 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1677 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], 16777215
1678 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 16777216
1679 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1680 // BEWIDTH-NEXT: ret void
1682 // LEWIDTHNUM-LABEL: @store_st9(
1683 // LEWIDTHNUM-NEXT: entry:
1684 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1685 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -256
1686 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 1
1687 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1688 // LEWIDTHNUM-NEXT: ret void
1690 // BEWIDTHNUM-LABEL: @store_st9(
1691 // BEWIDTHNUM-NEXT: entry:
1692 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1693 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], 16777215
1694 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 16777216
1695 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1696 // BEWIDTHNUM-NEXT: ret void
1698 void store_st9(volatile struct st9 *m) {
1699 m->f = 1;
1702 // LE-LABEL: @increment_st9(
1703 // LE-NEXT: entry:
1704 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1705 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1706 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1707 // LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
1708 // LE-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
1709 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
1710 // LE-NEXT: ret void
1712 // BE-LABEL: @increment_st9(
1713 // BE-NEXT: entry:
1714 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1715 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1716 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1717 // BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
1718 // BE-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
1719 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
1720 // BE-NEXT: ret void
1722 // LENUMLOADS-LABEL: @increment_st9(
1723 // LENUMLOADS-NEXT: entry:
1724 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1725 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1726 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1727 // LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
1728 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
1729 // LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
1730 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
1731 // LENUMLOADS-NEXT: ret void
1733 // BENUMLOADS-LABEL: @increment_st9(
1734 // BENUMLOADS-NEXT: entry:
1735 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
1736 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
1737 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1738 // BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
1739 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
1740 // BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
1741 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
1742 // BENUMLOADS-NEXT: ret void
1744 // LEWIDTH-LABEL: @increment_st9(
1745 // LEWIDTH-NEXT: entry:
1746 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1747 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1748 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1749 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
1750 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
1751 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1752 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
1753 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
1754 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1755 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1756 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
1757 // LEWIDTH-NEXT: ret void
1759 // BEWIDTH-LABEL: @increment_st9(
1760 // BEWIDTH-NEXT: entry:
1761 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1762 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1763 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
1764 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
1765 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1766 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1767 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
1768 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
1769 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1770 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1771 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
1772 // BEWIDTH-NEXT: ret void
1774 // LEWIDTHNUM-LABEL: @increment_st9(
1775 // LEWIDTHNUM-NEXT: entry:
1776 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1777 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
1778 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1779 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
1780 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
1781 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1782 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
1783 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
1784 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1785 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1786 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
1787 // LEWIDTHNUM-NEXT: ret void
1789 // BEWIDTHNUM-LABEL: @increment_st9(
1790 // BEWIDTHNUM-NEXT: entry:
1791 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1792 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
1793 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
1794 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
1795 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
1796 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1797 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
1798 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
1799 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1800 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
1801 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
1802 // BEWIDTHNUM-NEXT: ret void
1804 void increment_st9(volatile struct st9 *m) {
1805 ++m->f;
1808 struct st10{
1809 int e : 1;
1810 int f : 8;
1813 // LE-LABEL: @read_st10(
1814 // LE-NEXT: entry:
1815 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1816 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1817 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1818 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1819 // LE-NEXT: ret i32 [[BF_CAST]]
1821 // BE-LABEL: @read_st10(
1822 // BE-NEXT: entry:
1823 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1824 // BE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
1825 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1826 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1827 // BE-NEXT: ret i32 [[BF_CAST]]
1829 // LENUMLOADS-LABEL: @read_st10(
1830 // LENUMLOADS-NEXT: entry:
1831 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1832 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1833 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1834 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1835 // LENUMLOADS-NEXT: ret i32 [[BF_CAST]]
1837 // BENUMLOADS-LABEL: @read_st10(
1838 // BENUMLOADS-NEXT: entry:
1839 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1840 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
1841 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1842 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1843 // BENUMLOADS-NEXT: ret i32 [[BF_CAST]]
1845 // LEWIDTH-LABEL: @read_st10(
1846 // LEWIDTH-NEXT: entry:
1847 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1848 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
1849 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1850 // LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
1852 // BEWIDTH-LABEL: @read_st10(
1853 // BEWIDTH-NEXT: entry:
1854 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1855 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
1856 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1857 // BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
1859 // LEWIDTHNUM-LABEL: @read_st10(
1860 // LEWIDTHNUM-NEXT: entry:
1861 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1862 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
1863 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1864 // LEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
1866 // BEWIDTHNUM-LABEL: @read_st10(
1867 // BEWIDTHNUM-NEXT: entry:
1868 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1869 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
1870 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
1871 // BEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
1873 int read_st10(volatile struct st10 *m) {
1874 return m->f;
1877 // LE-LABEL: @store_st10(
1878 // LE-NEXT: entry:
1879 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1880 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -511
1881 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 2
1882 // LE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1883 // LE-NEXT: ret void
1885 // BE-LABEL: @store_st10(
1886 // BE-NEXT: entry:
1887 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1888 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -32641
1889 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
1890 // BE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1891 // BE-NEXT: ret void
1893 // LENUMLOADS-LABEL: @store_st10(
1894 // LENUMLOADS-NEXT: entry:
1895 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1896 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -511
1897 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 2
1898 // LENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1899 // LENUMLOADS-NEXT: ret void
1901 // BENUMLOADS-LABEL: @store_st10(
1902 // BENUMLOADS-NEXT: entry:
1903 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1904 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -32641
1905 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
1906 // BENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1907 // BENUMLOADS-NEXT: ret void
1909 // LEWIDTH-LABEL: @store_st10(
1910 // LEWIDTH-NEXT: entry:
1911 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1912 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -511
1913 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2
1914 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1915 // LEWIDTH-NEXT: ret void
1917 // BEWIDTH-LABEL: @store_st10(
1918 // BEWIDTH-NEXT: entry:
1919 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1920 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2139095041
1921 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 8388608
1922 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1923 // BEWIDTH-NEXT: ret void
1925 // LEWIDTHNUM-LABEL: @store_st10(
1926 // LEWIDTHNUM-NEXT: entry:
1927 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1928 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -511
1929 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2
1930 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1931 // LEWIDTHNUM-NEXT: ret void
1933 // BEWIDTHNUM-LABEL: @store_st10(
1934 // BEWIDTHNUM-NEXT: entry:
1935 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
1936 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2139095041
1937 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 8388608
1938 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
1939 // BEWIDTHNUM-NEXT: ret void
1941 void store_st10(volatile struct st10 *m) {
1942 m->f = 1;
1945 // LE-LABEL: @increment_st10(
1946 // LE-NEXT: entry:
1947 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1948 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1949 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1950 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1951 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1952 // LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
1953 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
1954 // LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
1955 // LE-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
1956 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
1957 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
1958 // LE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1959 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
1960 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
1961 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[BF_RESULT_ASHR]] to i32
1962 // LE-NEXT: ret void
1964 // BE-LABEL: @increment_st10(
1965 // BE-NEXT: entry:
1966 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1967 // BE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
1968 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1969 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1970 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1971 // BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
1972 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
1973 // BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
1974 // BE-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
1975 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
1976 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
1977 // BE-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1978 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
1979 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
1980 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[BF_RESULT_ASHR]] to i32
1981 // BE-NEXT: ret void
1983 // LENUMLOADS-LABEL: @increment_st10(
1984 // LENUMLOADS-NEXT: entry:
1985 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
1986 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 7
1987 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
1988 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
1989 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
1990 // LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
1991 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
1992 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
1993 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
1994 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
1995 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
1996 // LENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
1997 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
1998 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
1999 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[BF_RESULT_ASHR]] to i32
2000 // LENUMLOADS-NEXT: ret void
2002 // BENUMLOADS-LABEL: @increment_st10(
2003 // BENUMLOADS-NEXT: entry:
2004 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[M:%.*]], align 4
2005 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 1
2006 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
2007 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
2008 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2009 // BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
2010 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
2011 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
2012 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
2013 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
2014 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
2015 // BENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[M]], align 4
2016 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
2017 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
2018 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[BF_RESULT_ASHR]] to i32
2019 // BENUMLOADS-NEXT: ret void
2021 // LEWIDTH-LABEL: @increment_st10(
2022 // LEWIDTH-NEXT: entry:
2023 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2024 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
2025 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2026 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2027 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2028 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2029 // LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 1
2030 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -511
2031 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2032 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2033 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2034 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2035 // LEWIDTH-NEXT: ret void
2037 // BEWIDTH-LABEL: @increment_st10(
2038 // BEWIDTH-NEXT: entry:
2039 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2040 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
2041 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2042 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2043 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2044 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2045 // BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 23
2046 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -2139095041
2047 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2048 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2049 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2050 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2051 // BEWIDTH-NEXT: ret void
2053 // LEWIDTHNUM-LABEL: @increment_st10(
2054 // LEWIDTHNUM-NEXT: entry:
2055 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2056 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
2057 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2058 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2059 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2060 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2061 // LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 1
2062 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -511
2063 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2064 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2065 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2066 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2067 // LEWIDTHNUM-NEXT: ret void
2069 // BEWIDTHNUM-LABEL: @increment_st10(
2070 // BEWIDTHNUM-NEXT: entry:
2071 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2072 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
2073 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2074 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2075 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2076 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2077 // BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 23
2078 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -2139095041
2079 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2080 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2081 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2082 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2083 // BEWIDTHNUM-NEXT: ret void
2085 void increment_st10(volatile struct st10 *m) {
2086 ++m->f;
2089 struct st11{
2090 char e;
2091 int f : 16;
2094 // LE-LABEL: @read_st11(
2095 // LE-NEXT: entry:
2096 // LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2097 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2098 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2099 // LE-NEXT: ret i32 [[BF_CAST]]
2101 // BE-LABEL: @read_st11(
2102 // BE-NEXT: entry:
2103 // BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2104 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2105 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2106 // BE-NEXT: ret i32 [[BF_CAST]]
2108 // LENUMLOADS-LABEL: @read_st11(
2109 // LENUMLOADS-NEXT: entry:
2110 // LENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2111 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2112 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2113 // LENUMLOADS-NEXT: ret i32 [[BF_CAST]]
2115 // BENUMLOADS-LABEL: @read_st11(
2116 // BENUMLOADS-NEXT: entry:
2117 // BENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2118 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2119 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2120 // BENUMLOADS-NEXT: ret i32 [[BF_CAST]]
2122 // LEWIDTH-LABEL: @read_st11(
2123 // LEWIDTH-NEXT: entry:
2124 // LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2125 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2126 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2127 // LEWIDTH-NEXT: ret i32 [[BF_CAST]]
2129 // BEWIDTH-LABEL: @read_st11(
2130 // BEWIDTH-NEXT: entry:
2131 // BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2132 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2133 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2134 // BEWIDTH-NEXT: ret i32 [[BF_CAST]]
2136 // LEWIDTHNUM-LABEL: @read_st11(
2137 // LEWIDTHNUM-NEXT: entry:
2138 // LEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2139 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2140 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2141 // LEWIDTHNUM-NEXT: ret i32 [[BF_CAST]]
2143 // BEWIDTHNUM-LABEL: @read_st11(
2144 // BEWIDTHNUM-NEXT: entry:
2145 // BEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2146 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2147 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2148 // BEWIDTHNUM-NEXT: ret i32 [[BF_CAST]]
2150 int read_st11(volatile struct st11 *m) {
2151 return m->f;
2154 // LE-LABEL: @store_st11(
2155 // LE-NEXT: entry:
2156 // LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2157 // LE-NEXT: store volatile i16 1, ptr [[F]], align 1
2158 // LE-NEXT: ret void
2160 // BE-LABEL: @store_st11(
2161 // BE-NEXT: entry:
2162 // BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2163 // BE-NEXT: store volatile i16 1, ptr [[F]], align 1
2164 // BE-NEXT: ret void
2166 // LENUMLOADS-LABEL: @store_st11(
2167 // LENUMLOADS-NEXT: entry:
2168 // LENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2169 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2170 // LENUMLOADS-NEXT: store volatile i16 1, ptr [[F]], align 1
2171 // LENUMLOADS-NEXT: ret void
2173 // BENUMLOADS-LABEL: @store_st11(
2174 // BENUMLOADS-NEXT: entry:
2175 // BENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2176 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2177 // BENUMLOADS-NEXT: store volatile i16 1, ptr [[F]], align 1
2178 // BENUMLOADS-NEXT: ret void
2180 // LEWIDTH-LABEL: @store_st11(
2181 // LEWIDTH-NEXT: entry:
2182 // LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2183 // LEWIDTH-NEXT: store volatile i16 1, ptr [[F]], align 1
2184 // LEWIDTH-NEXT: ret void
2186 // BEWIDTH-LABEL: @store_st11(
2187 // BEWIDTH-NEXT: entry:
2188 // BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2189 // BEWIDTH-NEXT: store volatile i16 1, ptr [[F]], align 1
2190 // BEWIDTH-NEXT: ret void
2192 // LEWIDTHNUM-LABEL: @store_st11(
2193 // LEWIDTHNUM-NEXT: entry:
2194 // LEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2195 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2196 // LEWIDTHNUM-NEXT: store volatile i16 1, ptr [[F]], align 1
2197 // LEWIDTHNUM-NEXT: ret void
2199 // BEWIDTHNUM-LABEL: @store_st11(
2200 // BEWIDTHNUM-NEXT: entry:
2201 // BEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2202 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2203 // BEWIDTHNUM-NEXT: store volatile i16 1, ptr [[F]], align 1
2204 // BEWIDTHNUM-NEXT: ret void
2206 void store_st11(volatile struct st11 *m) {
2207 m->f = 1;
2210 // LE-LABEL: @increment_st11(
2211 // LE-NEXT: entry:
2212 // LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2213 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2214 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2215 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2216 // LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2217 // LE-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2218 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2219 // LE-NEXT: ret void
2221 // BE-LABEL: @increment_st11(
2222 // BE-NEXT: entry:
2223 // BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2224 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2225 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2226 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2227 // BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2228 // BE-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2229 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2230 // BE-NEXT: ret void
2232 // LENUMLOADS-LABEL: @increment_st11(
2233 // LENUMLOADS-NEXT: entry:
2234 // LENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2235 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2236 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2237 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2238 // LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2239 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[F]], align 1
2240 // LENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2241 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2242 // LENUMLOADS-NEXT: ret void
2244 // BENUMLOADS-LABEL: @increment_st11(
2245 // BENUMLOADS-NEXT: entry:
2246 // BENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2247 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2248 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2249 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2250 // BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2251 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[F]], align 1
2252 // BENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2253 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2254 // BENUMLOADS-NEXT: ret void
2256 // LEWIDTH-LABEL: @increment_st11(
2257 // LEWIDTH-NEXT: entry:
2258 // LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2259 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2260 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2261 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2262 // LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2263 // LEWIDTH-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2264 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2265 // LEWIDTH-NEXT: ret void
2267 // BEWIDTH-LABEL: @increment_st11(
2268 // BEWIDTH-NEXT: entry:
2269 // BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2270 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2271 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2272 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2273 // BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2274 // BEWIDTH-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2275 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2276 // BEWIDTH-NEXT: ret void
2278 // LEWIDTHNUM-LABEL: @increment_st11(
2279 // LEWIDTHNUM-NEXT: entry:
2280 // LEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2281 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2282 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2283 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2284 // LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2285 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[F]], align 1
2286 // LEWIDTHNUM-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2287 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2288 // LEWIDTHNUM-NEXT: ret void
2290 // BEWIDTHNUM-LABEL: @increment_st11(
2291 // BEWIDTHNUM-NEXT: entry:
2292 // BEWIDTHNUM-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 1
2293 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[F]], align 1
2294 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
2295 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2296 // BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
2297 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[F]], align 1
2298 // BEWIDTHNUM-NEXT: store volatile i16 [[TMP0]], ptr [[F]], align 1
2299 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
2300 // BEWIDTHNUM-NEXT: ret void
2302 void increment_st11(volatile struct st11 *m) {
2303 ++m->f;
2306 // LE-LABEL: @increment_e_st11(
2307 // LE-NEXT: entry:
2308 // LE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2309 // LE-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2310 // LE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2311 // LE-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2312 // LE-NEXT: ret void
2314 // BE-LABEL: @increment_e_st11(
2315 // BE-NEXT: entry:
2316 // BE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2317 // BE-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2318 // BE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2319 // BE-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2320 // BE-NEXT: ret void
2322 // LENUMLOADS-LABEL: @increment_e_st11(
2323 // LENUMLOADS-NEXT: entry:
2324 // LENUMLOADS-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2325 // LENUMLOADS-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2326 // LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2327 // LENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2328 // LENUMLOADS-NEXT: ret void
2330 // BENUMLOADS-LABEL: @increment_e_st11(
2331 // BENUMLOADS-NEXT: entry:
2332 // BENUMLOADS-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2333 // BENUMLOADS-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2334 // BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2335 // BENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2336 // BENUMLOADS-NEXT: ret void
2338 // LEWIDTH-LABEL: @increment_e_st11(
2339 // LEWIDTH-NEXT: entry:
2340 // LEWIDTH-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2341 // LEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2342 // LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2343 // LEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2344 // LEWIDTH-NEXT: ret void
2346 // BEWIDTH-LABEL: @increment_e_st11(
2347 // BEWIDTH-NEXT: entry:
2348 // BEWIDTH-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2349 // BEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2350 // BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2351 // BEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2352 // BEWIDTH-NEXT: ret void
2354 // LEWIDTHNUM-LABEL: @increment_e_st11(
2355 // LEWIDTHNUM-NEXT: entry:
2356 // LEWIDTHNUM-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2357 // LEWIDTHNUM-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2358 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2359 // LEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2360 // LEWIDTHNUM-NEXT: ret void
2362 // BEWIDTHNUM-LABEL: @increment_e_st11(
2363 // BEWIDTHNUM-NEXT: entry:
2364 // BEWIDTHNUM-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], ptr [[M:%.*]], i32 0, i32 0
2365 // BEWIDTHNUM-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[E]], align 4
2366 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
2367 // BEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[E]], align 4
2368 // BEWIDTHNUM-NEXT: ret void
2370 void increment_e_st11(volatile struct st11 *m) {
2371 ++m->e;
2374 struct st12{
2375 int e : 8;
2376 int f : 16;
2379 // LE-LABEL: @read_st12(
2380 // LE-NEXT: entry:
2381 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2382 // LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2383 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2384 // LE-NEXT: ret i32 [[BF_ASHR]]
2386 // BE-LABEL: @read_st12(
2387 // BE-NEXT: entry:
2388 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2389 // BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2390 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2391 // BE-NEXT: ret i32 [[BF_ASHR]]
2393 // LENUMLOADS-LABEL: @read_st12(
2394 // LENUMLOADS-NEXT: entry:
2395 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2396 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2397 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2398 // LENUMLOADS-NEXT: ret i32 [[BF_ASHR]]
2400 // BENUMLOADS-LABEL: @read_st12(
2401 // BENUMLOADS-NEXT: entry:
2402 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2403 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2404 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2405 // BENUMLOADS-NEXT: ret i32 [[BF_ASHR]]
2407 // LEWIDTH-LABEL: @read_st12(
2408 // LEWIDTH-NEXT: entry:
2409 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2410 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2411 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2412 // LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
2414 // BEWIDTH-LABEL: @read_st12(
2415 // BEWIDTH-NEXT: entry:
2416 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2417 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2418 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2419 // BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
2421 // LEWIDTHNUM-LABEL: @read_st12(
2422 // LEWIDTHNUM-NEXT: entry:
2423 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2424 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2425 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2426 // LEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
2428 // BEWIDTHNUM-LABEL: @read_st12(
2429 // BEWIDTHNUM-NEXT: entry:
2430 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2431 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2432 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2433 // BEWIDTHNUM-NEXT: ret i32 [[BF_ASHR]]
2435 int read_st12(volatile struct st12 *m) {
2436 return m->f;
2439 // LE-LABEL: @store_st12(
2440 // LE-NEXT: entry:
2441 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2442 // LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2443 // LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2444 // LE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2445 // LE-NEXT: ret void
2447 // BE-LABEL: @store_st12(
2448 // BE-NEXT: entry:
2449 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2450 // BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2451 // BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2452 // BE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2453 // BE-NEXT: ret void
2455 // LENUMLOADS-LABEL: @store_st12(
2456 // LENUMLOADS-NEXT: entry:
2457 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2458 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2459 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2460 // LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2461 // LENUMLOADS-NEXT: ret void
2463 // BENUMLOADS-LABEL: @store_st12(
2464 // BENUMLOADS-NEXT: entry:
2465 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2466 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2467 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2468 // BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2469 // BENUMLOADS-NEXT: ret void
2471 // LEWIDTH-LABEL: @store_st12(
2472 // LEWIDTH-NEXT: entry:
2473 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2474 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2475 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2476 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2477 // LEWIDTH-NEXT: ret void
2479 // BEWIDTH-LABEL: @store_st12(
2480 // BEWIDTH-NEXT: entry:
2481 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2482 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2483 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2484 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2485 // BEWIDTH-NEXT: ret void
2487 // LEWIDTHNUM-LABEL: @store_st12(
2488 // LEWIDTHNUM-NEXT: entry:
2489 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2490 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2491 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2492 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2493 // LEWIDTHNUM-NEXT: ret void
2495 // BEWIDTHNUM-LABEL: @store_st12(
2496 // BEWIDTHNUM-NEXT: entry:
2497 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2498 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
2499 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
2500 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2501 // BEWIDTHNUM-NEXT: ret void
2503 void store_st12(volatile struct st12 *m) {
2504 m->f = 1;
2507 // LE-LABEL: @increment_st12(
2508 // LE-NEXT: entry:
2509 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2510 // LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2511 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2512 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2513 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2514 // LE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2515 // LE-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2516 // LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2517 // LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2518 // LE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2519 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2520 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2521 // LE-NEXT: ret void
2523 // BE-LABEL: @increment_st12(
2524 // BE-NEXT: entry:
2525 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2526 // BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2527 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2528 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2529 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2530 // BE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2531 // BE-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2532 // BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2533 // BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2534 // BE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2535 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2536 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2537 // BE-NEXT: ret void
2539 // LENUMLOADS-LABEL: @increment_st12(
2540 // LENUMLOADS-NEXT: entry:
2541 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2542 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2543 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2544 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2545 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2546 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2547 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2548 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2549 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2550 // LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2551 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2552 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2553 // LENUMLOADS-NEXT: ret void
2555 // BENUMLOADS-LABEL: @increment_st12(
2556 // BENUMLOADS-NEXT: entry:
2557 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2558 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2559 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2560 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2561 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2562 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2563 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2564 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2565 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2566 // BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2567 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2568 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2569 // BENUMLOADS-NEXT: ret void
2571 // LEWIDTH-LABEL: @increment_st12(
2572 // LEWIDTH-NEXT: entry:
2573 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2574 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2575 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2576 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2577 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2578 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2579 // LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2580 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2581 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2582 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2583 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2584 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2585 // LEWIDTH-NEXT: ret void
2587 // BEWIDTH-LABEL: @increment_st12(
2588 // BEWIDTH-NEXT: entry:
2589 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2590 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2591 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2592 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2593 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2594 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2595 // BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2596 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2597 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2598 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2599 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2600 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2601 // BEWIDTH-NEXT: ret void
2603 // LEWIDTHNUM-LABEL: @increment_st12(
2604 // LEWIDTHNUM-NEXT: entry:
2605 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2606 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2607 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2608 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2609 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2610 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2611 // LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2612 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2613 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2614 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2615 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2616 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2617 // LEWIDTHNUM-NEXT: ret void
2619 // BEWIDTHNUM-LABEL: @increment_st12(
2620 // BEWIDTHNUM-NEXT: entry:
2621 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2622 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
2623 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
2624 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2625 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2626 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
2627 // BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i32 [[BF_VALUE]], 8
2628 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
2629 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
2630 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2631 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
2632 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
2633 // BEWIDTHNUM-NEXT: ret void
2635 void increment_st12(volatile struct st12 *m) {
2636 ++m->f;
2639 // LE-LABEL: @increment_e_st12(
2640 // LE-NEXT: entry:
2641 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2642 // LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
2643 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2644 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2645 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2646 // LE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2647 // LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2648 // LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2649 // LE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2650 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2651 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2652 // LE-NEXT: ret void
2654 // BE-LABEL: @increment_e_st12(
2655 // BE-NEXT: entry:
2656 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2657 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
2658 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2659 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2660 // BE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2661 // BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2662 // BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2663 // BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2664 // BE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2665 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2666 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2667 // BE-NEXT: ret void
2669 // LENUMLOADS-LABEL: @increment_e_st12(
2670 // LENUMLOADS-NEXT: entry:
2671 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2672 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
2673 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2674 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2675 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2676 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2677 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2678 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2679 // LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2680 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2681 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2682 // LENUMLOADS-NEXT: ret void
2684 // BENUMLOADS-LABEL: @increment_e_st12(
2685 // BENUMLOADS-NEXT: entry:
2686 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2687 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
2688 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2689 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2690 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2691 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2692 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2693 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2694 // BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2695 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2696 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2697 // BENUMLOADS-NEXT: ret void
2699 // LEWIDTH-LABEL: @increment_e_st12(
2700 // LEWIDTH-NEXT: entry:
2701 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2702 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
2703 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2704 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2705 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2706 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2707 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2708 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2709 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2710 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2711 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2712 // LEWIDTH-NEXT: ret void
2714 // BEWIDTH-LABEL: @increment_e_st12(
2715 // BEWIDTH-NEXT: entry:
2716 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2717 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
2718 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2719 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2720 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2721 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2722 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2723 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2724 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2725 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2726 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2727 // BEWIDTH-NEXT: ret void
2729 // LEWIDTHNUM-LABEL: @increment_e_st12(
2730 // LEWIDTHNUM-NEXT: entry:
2731 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2732 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
2733 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
2734 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2735 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2736 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2737 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
2738 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
2739 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2740 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2741 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2742 // LEWIDTHNUM-NEXT: ret void
2744 // BEWIDTHNUM-LABEL: @increment_e_st12(
2745 // BEWIDTHNUM-NEXT: entry:
2746 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
2747 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
2748 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
2749 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[M]], align 4
2750 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
2751 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2752 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
2753 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
2754 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
2755 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 24
2756 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 24
2757 // BEWIDTHNUM-NEXT: ret void
2759 void increment_e_st12(volatile struct st12 *m) {
2760 ++m->e;
2763 struct st13 {
2764 char a : 8;
2765 int b : 32;
2766 } __attribute__((packed));
2768 // LE-LABEL: @increment_b_st13(
2769 // LE-NEXT: entry:
2770 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2771 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
2772 // LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2773 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2774 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2775 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2776 // LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2777 // LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2778 // LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2779 // LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
2780 // LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2781 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2782 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2783 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2784 // LE-NEXT: ret void
2786 // BE-LABEL: @increment_b_st13(
2787 // BE-NEXT: entry:
2788 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2789 // BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
2790 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
2791 // BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2792 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2793 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2794 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2795 // BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2796 // BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2797 // BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
2798 // BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2799 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2800 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2801 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2802 // BE-NEXT: ret void
2804 // LENUMLOADS-LABEL: @increment_b_st13(
2805 // LENUMLOADS-NEXT: entry:
2806 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2807 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
2808 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2809 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2810 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2811 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2812 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2813 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2814 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2815 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
2816 // LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2817 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2818 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2819 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2820 // LENUMLOADS-NEXT: ret void
2822 // BENUMLOADS-LABEL: @increment_b_st13(
2823 // BENUMLOADS-NEXT: entry:
2824 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2825 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
2826 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
2827 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2828 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2829 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2830 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2831 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2832 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2833 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
2834 // BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2835 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2836 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2837 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2838 // BENUMLOADS-NEXT: ret void
2840 // LEWIDTH-LABEL: @increment_b_st13(
2841 // LEWIDTH-NEXT: entry:
2842 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2843 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
2844 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2845 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2846 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2847 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2848 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2849 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2850 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2851 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
2852 // LEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2853 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2854 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2855 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2856 // LEWIDTH-NEXT: ret void
2858 // BEWIDTH-LABEL: @increment_b_st13(
2859 // BEWIDTH-NEXT: entry:
2860 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2861 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
2862 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
2863 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2864 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2865 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2866 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2867 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2868 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2869 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
2870 // BEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2871 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2872 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2873 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2874 // BEWIDTH-NEXT: ret void
2876 // LEWIDTHNUM-LABEL: @increment_b_st13(
2877 // LEWIDTHNUM-NEXT: entry:
2878 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2879 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
2880 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2881 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2882 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2883 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2884 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2885 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2886 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
2887 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
2888 // LEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2889 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2890 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2891 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2892 // LEWIDTHNUM-NEXT: ret void
2894 // BEWIDTHNUM-LABEL: @increment_b_st13(
2895 // BEWIDTHNUM-NEXT: entry:
2896 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
2897 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
2898 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
2899 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
2900 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
2901 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
2902 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
2903 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
2904 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
2905 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
2906 // BEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
2907 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
2908 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
2909 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
2910 // BEWIDTHNUM-NEXT: ret void
2912 void increment_b_st13(volatile struct st13 *s) {
2913 s->b++;
2916 struct st14 {
2917 char a : 8;
2918 } __attribute__((packed));
2920 // LE-LABEL: @increment_a_st14(
2921 // LE-NEXT: entry:
2922 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2923 // LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2924 // LE-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2925 // LE-NEXT: ret void
2927 // BE-LABEL: @increment_a_st14(
2928 // BE-NEXT: entry:
2929 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2930 // BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2931 // BE-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2932 // BE-NEXT: ret void
2934 // LENUMLOADS-LABEL: @increment_a_st14(
2935 // LENUMLOADS-NEXT: entry:
2936 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2937 // LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2938 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
2939 // LENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2940 // LENUMLOADS-NEXT: ret void
2942 // BENUMLOADS-LABEL: @increment_a_st14(
2943 // BENUMLOADS-NEXT: entry:
2944 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2945 // BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2946 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
2947 // BENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2948 // BENUMLOADS-NEXT: ret void
2950 // LEWIDTH-LABEL: @increment_a_st14(
2951 // LEWIDTH-NEXT: entry:
2952 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2953 // LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2954 // LEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2955 // LEWIDTH-NEXT: ret void
2957 // BEWIDTH-LABEL: @increment_a_st14(
2958 // BEWIDTH-NEXT: entry:
2959 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2960 // BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2961 // BEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2962 // BEWIDTH-NEXT: ret void
2964 // LEWIDTHNUM-LABEL: @increment_a_st14(
2965 // LEWIDTHNUM-NEXT: entry:
2966 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2967 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2968 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
2969 // LEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2970 // LEWIDTHNUM-NEXT: ret void
2972 // BEWIDTHNUM-LABEL: @increment_a_st14(
2973 // BEWIDTHNUM-NEXT: entry:
2974 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2975 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
2976 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
2977 // BEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[S]], align 1
2978 // BEWIDTHNUM-NEXT: ret void
2980 void increment_a_st14(volatile struct st14 *s) {
2981 s->a++;
2984 struct st15 {
2985 short a : 8;
2986 } __attribute__((packed));
2988 // LE-LABEL: @increment_a_st15(
2989 // LE-NEXT: entry:
2990 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
2991 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
2992 // LE-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
2993 // LE-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
2994 // LE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
2995 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
2996 // LE-NEXT: ret void
2998 // BE-LABEL: @increment_a_st15(
2999 // BE-NEXT: entry:
3000 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3001 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3002 // BE-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3003 // BE-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3004 // BE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3005 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3006 // BE-NEXT: ret void
3008 // LENUMLOADS-LABEL: @increment_a_st15(
3009 // LENUMLOADS-NEXT: entry:
3010 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3011 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3012 // LENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3013 // LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3014 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
3015 // LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3016 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3017 // LENUMLOADS-NEXT: ret void
3019 // BENUMLOADS-LABEL: @increment_a_st15(
3020 // BENUMLOADS-NEXT: entry:
3021 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3022 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3023 // BENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3024 // BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3025 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
3026 // BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3027 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3028 // BENUMLOADS-NEXT: ret void
3030 // LEWIDTH-LABEL: @increment_a_st15(
3031 // LEWIDTH-NEXT: entry:
3032 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3033 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3034 // LEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3035 // LEWIDTH-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3036 // LEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3037 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3038 // LEWIDTH-NEXT: ret void
3040 // BEWIDTH-LABEL: @increment_a_st15(
3041 // BEWIDTH-NEXT: entry:
3042 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3043 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3044 // BEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3045 // BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3046 // BEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3047 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3048 // BEWIDTH-NEXT: ret void
3050 // LEWIDTHNUM-LABEL: @increment_a_st15(
3051 // LEWIDTHNUM-NEXT: entry:
3052 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3053 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3054 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3055 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3056 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
3057 // LEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3058 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3059 // LEWIDTHNUM-NEXT: ret void
3061 // BEWIDTHNUM-LABEL: @increment_a_st15(
3062 // BEWIDTHNUM-NEXT: entry:
3063 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
3064 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
3065 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
3066 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
3067 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
3068 // BEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
3069 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
3070 // BEWIDTHNUM-NEXT: ret void
3072 void increment_a_st15(volatile struct st15 *s) {
3073 s->a++;
3076 struct st16 {
3077 int a : 32;
3078 int b : 16;
3079 int c : 32;
3080 int d : 16;
3083 // LE-LABEL: @increment_a_st16(
3084 // LE-NEXT: entry:
3085 // LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3086 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3087 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3088 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3089 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3090 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3091 // LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3092 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3093 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3094 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3095 // LE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3096 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3097 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3098 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3099 // LE-NEXT: ret void
3101 // BE-LABEL: @increment_a_st16(
3102 // BE-NEXT: entry:
3103 // BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3104 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3105 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3106 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3107 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3108 // BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3109 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3110 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3111 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3112 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3113 // BE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3114 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3115 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3116 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3117 // BE-NEXT: ret void
3119 // LENUMLOADS-LABEL: @increment_a_st16(
3120 // LENUMLOADS-NEXT: entry:
3121 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3122 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3123 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3124 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3125 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3126 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3127 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3128 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3129 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3130 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3131 // LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3132 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3133 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3134 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3135 // LENUMLOADS-NEXT: ret void
3137 // BENUMLOADS-LABEL: @increment_a_st16(
3138 // BENUMLOADS-NEXT: entry:
3139 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3140 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3141 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3142 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3143 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3144 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3145 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3146 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3147 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3148 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3149 // BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3150 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3151 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3152 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3153 // BENUMLOADS-NEXT: ret void
3155 // LEWIDTH-LABEL: @increment_a_st16(
3156 // LEWIDTH-NEXT: entry:
3157 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3158 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3159 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3160 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3161 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3162 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3163 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3164 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3165 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3166 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3167 // LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3168 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3169 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3170 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3171 // LEWIDTH-NEXT: ret void
3173 // BEWIDTH-LABEL: @increment_a_st16(
3174 // BEWIDTH-NEXT: entry:
3175 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3176 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3177 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3178 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3179 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3180 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3181 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3182 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3183 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3184 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3185 // BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3186 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3187 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3188 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3189 // BEWIDTH-NEXT: ret void
3191 // LEWIDTHNUM-LABEL: @increment_a_st16(
3192 // LEWIDTHNUM-NEXT: entry:
3193 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3194 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3195 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3196 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3197 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3198 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3199 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3200 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3201 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3202 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3203 // LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3204 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3205 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3206 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3207 // LEWIDTHNUM-NEXT: ret void
3209 // BEWIDTHNUM-LABEL: @increment_a_st16(
3210 // BEWIDTHNUM-NEXT: entry:
3211 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3212 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3213 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3214 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3215 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3216 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3217 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3218 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3219 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3220 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3221 // BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3222 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3223 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3224 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3225 // BEWIDTHNUM-NEXT: ret void
3227 void increment_a_st16(struct st16 *s) {
3228 s->a++;
3231 // LE-LABEL: @increment_b_st16(
3232 // LE-NEXT: entry:
3233 // LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3234 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3235 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3236 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3237 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3238 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3239 // LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3240 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3241 // LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3242 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3243 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3244 // LE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3245 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3246 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3247 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3248 // LE-NEXT: ret void
3250 // BE-LABEL: @increment_b_st16(
3251 // BE-NEXT: entry:
3252 // BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3253 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3254 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3255 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3256 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3257 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3258 // BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3259 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3260 // BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3261 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3262 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3263 // BE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3264 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3265 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3266 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3267 // BE-NEXT: ret void
3269 // LENUMLOADS-LABEL: @increment_b_st16(
3270 // LENUMLOADS-NEXT: entry:
3271 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3272 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3273 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3274 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3275 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3276 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3277 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3278 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3279 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3280 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3281 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3282 // LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3283 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3284 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3285 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3286 // LENUMLOADS-NEXT: ret void
3288 // BENUMLOADS-LABEL: @increment_b_st16(
3289 // BENUMLOADS-NEXT: entry:
3290 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3291 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3292 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3293 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3294 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3295 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3296 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3297 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3298 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3299 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3300 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3301 // BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3302 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3303 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3304 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3305 // BENUMLOADS-NEXT: ret void
3307 // LEWIDTH-LABEL: @increment_b_st16(
3308 // LEWIDTH-NEXT: entry:
3309 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3310 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3311 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3312 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3313 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3314 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3315 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3316 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3317 // LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3318 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3319 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3320 // LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3321 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3322 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3323 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3324 // LEWIDTH-NEXT: ret void
3326 // BEWIDTH-LABEL: @increment_b_st16(
3327 // BEWIDTH-NEXT: entry:
3328 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3329 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3330 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3331 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3332 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3333 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3334 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3335 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3336 // BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3337 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3338 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3339 // BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3340 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3341 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3342 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3343 // BEWIDTH-NEXT: ret void
3345 // LEWIDTHNUM-LABEL: @increment_b_st16(
3346 // LEWIDTHNUM-NEXT: entry:
3347 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3348 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3349 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3350 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3351 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3352 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3353 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3354 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3355 // LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3356 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3357 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3358 // LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3359 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3360 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3361 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3362 // LEWIDTHNUM-NEXT: ret void
3364 // BEWIDTHNUM-LABEL: @increment_b_st16(
3365 // BEWIDTHNUM-NEXT: entry:
3366 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
3367 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3368 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3369 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3370 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3371 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3372 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
3373 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3374 // BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3375 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3376 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3377 // BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
3378 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3379 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3380 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3381 // BEWIDTHNUM-NEXT: ret void
3383 void increment_b_st16(struct st16 *s) {
3384 s->b++;
3387 // LE-LABEL: @increment_c_st16(
3388 // LE-NEXT: entry:
3389 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3390 // LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3391 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3392 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3393 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3394 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3395 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3396 // LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3397 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3398 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3399 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3400 // LE-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3401 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3402 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3403 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3404 // LE-NEXT: ret void
3406 // BE-LABEL: @increment_c_st16(
3407 // BE-NEXT: entry:
3408 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3409 // BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3410 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3411 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3412 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3413 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3414 // BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3415 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3416 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3417 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3418 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3419 // BE-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3420 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3421 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3422 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3423 // BE-NEXT: ret void
3425 // LENUMLOADS-LABEL: @increment_c_st16(
3426 // LENUMLOADS-NEXT: entry:
3427 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3428 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3429 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3430 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3431 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3432 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3433 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3434 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3435 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3436 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3437 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3438 // LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3439 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3440 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3441 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3442 // LENUMLOADS-NEXT: ret void
3444 // BENUMLOADS-LABEL: @increment_c_st16(
3445 // BENUMLOADS-NEXT: entry:
3446 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3447 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3448 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3449 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3450 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3451 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3452 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3453 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3454 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3455 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3456 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3457 // BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3458 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3459 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3460 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3461 // BENUMLOADS-NEXT: ret void
3463 // LEWIDTH-LABEL: @increment_c_st16(
3464 // LEWIDTH-NEXT: entry:
3465 // LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3466 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3467 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3468 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3469 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3470 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3471 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3472 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3473 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3474 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3475 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3476 // LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3477 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3478 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3479 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3480 // LEWIDTH-NEXT: ret void
3482 // BEWIDTH-LABEL: @increment_c_st16(
3483 // BEWIDTH-NEXT: entry:
3484 // BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3485 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3486 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3487 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3488 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3489 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3490 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3491 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3492 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3493 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3494 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3495 // BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3496 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3497 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3498 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3499 // BEWIDTH-NEXT: ret void
3501 // LEWIDTHNUM-LABEL: @increment_c_st16(
3502 // LEWIDTHNUM-NEXT: entry:
3503 // LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3504 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3505 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3506 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3507 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3508 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3509 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3510 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3511 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3512 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3513 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3514 // LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3515 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3516 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3517 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3518 // LEWIDTHNUM-NEXT: ret void
3520 // BEWIDTHNUM-LABEL: @increment_c_st16(
3521 // BEWIDTHNUM-NEXT: entry:
3522 // BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3523 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
3524 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3525 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3526 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3527 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3528 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
3529 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3530 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3531 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3532 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3533 // BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
3534 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3535 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3536 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3537 // BEWIDTHNUM-NEXT: ret void
3539 void increment_c_st16(struct st16 *s) {
3540 s->c++;
3543 // LE-LABEL: @increment_d_st16(
3544 // LE-NEXT: entry:
3545 // LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3546 // LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3547 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3548 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3549 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3550 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3551 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3552 // LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3553 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3554 // LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3555 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3556 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3557 // LE-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3558 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3559 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3560 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3561 // LE-NEXT: ret void
3563 // BE-LABEL: @increment_d_st16(
3564 // BE-NEXT: entry:
3565 // BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3566 // BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3567 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3568 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3569 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3570 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3571 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3572 // BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3573 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3574 // BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3575 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3576 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3577 // BE-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3578 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3579 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3580 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3581 // BE-NEXT: ret void
3583 // LENUMLOADS-LABEL: @increment_d_st16(
3584 // LENUMLOADS-NEXT: entry:
3585 // LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3586 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3587 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3588 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3589 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3590 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3591 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3592 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3593 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3594 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3595 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3596 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3597 // LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3598 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3599 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3600 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3601 // LENUMLOADS-NEXT: ret void
3603 // BENUMLOADS-LABEL: @increment_d_st16(
3604 // BENUMLOADS-NEXT: entry:
3605 // BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3606 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3607 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3608 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3609 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3610 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3611 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3612 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3613 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3614 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3615 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3616 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3617 // BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3618 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3619 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3620 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3621 // BENUMLOADS-NEXT: ret void
3623 // LEWIDTH-LABEL: @increment_d_st16(
3624 // LEWIDTH-NEXT: entry:
3625 // LEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3626 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3627 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3628 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3629 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3630 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3631 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3632 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3633 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3634 // LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3635 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3636 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3637 // LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3638 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3639 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3640 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3641 // LEWIDTH-NEXT: ret void
3643 // BEWIDTH-LABEL: @increment_d_st16(
3644 // BEWIDTH-NEXT: entry:
3645 // BEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3646 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3647 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3648 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3649 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3650 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3651 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3652 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3653 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3654 // BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3655 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3656 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3657 // BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3658 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3659 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3660 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3661 // BEWIDTH-NEXT: ret void
3663 // LEWIDTHNUM-LABEL: @increment_d_st16(
3664 // LEWIDTHNUM-NEXT: entry:
3665 // LEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3666 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3667 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3668 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3669 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3670 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3671 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3672 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3673 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3674 // LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3675 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3676 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3677 // LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3678 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3679 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3680 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3681 // LEWIDTHNUM-NEXT: ret void
3683 // BEWIDTHNUM-LABEL: @increment_d_st16(
3684 // BEWIDTHNUM-NEXT: entry:
3685 // BEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3686 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
3687 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3688 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3689 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3690 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3691 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3692 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
3693 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3694 // BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3695 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3696 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3697 // BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
3698 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3699 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3700 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3701 // BEWIDTHNUM-NEXT: ret void
3703 void increment_d_st16(struct st16 *s) {
3704 s->d++;
3707 // LE-LABEL: @increment_v_a_st16(
3708 // LE-NEXT: entry:
3709 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3710 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3711 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3712 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3713 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3714 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3715 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3716 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3717 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3718 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3719 // LE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3720 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3721 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3722 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3723 // LE-NEXT: ret void
3725 // BE-LABEL: @increment_v_a_st16(
3726 // BE-NEXT: entry:
3727 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3728 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3729 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3730 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3731 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3732 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3733 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3734 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3735 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3736 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3737 // BE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3738 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3739 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3740 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3741 // BE-NEXT: ret void
3743 // LENUMLOADS-LABEL: @increment_v_a_st16(
3744 // LENUMLOADS-NEXT: entry:
3745 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3746 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3747 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3748 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3749 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3750 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3751 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3752 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3753 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3754 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3755 // LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3756 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3757 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3758 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3759 // LENUMLOADS-NEXT: ret void
3761 // BENUMLOADS-LABEL: @increment_v_a_st16(
3762 // BENUMLOADS-NEXT: entry:
3763 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3764 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3765 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3766 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3767 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3768 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3769 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3770 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3771 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3772 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3773 // BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3774 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3775 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3776 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3777 // BENUMLOADS-NEXT: ret void
3779 // LEWIDTH-LABEL: @increment_v_a_st16(
3780 // LEWIDTH-NEXT: entry:
3781 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
3782 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3783 // LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
3784 // LEWIDTH-NEXT: ret void
3786 // BEWIDTH-LABEL: @increment_v_a_st16(
3787 // BEWIDTH-NEXT: entry:
3788 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
3789 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3790 // BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
3791 // BEWIDTH-NEXT: ret void
3793 // LEWIDTHNUM-LABEL: @increment_v_a_st16(
3794 // LEWIDTHNUM-NEXT: entry:
3795 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
3796 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3797 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
3798 // LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
3799 // LEWIDTHNUM-NEXT: ret void
3801 // BEWIDTHNUM-LABEL: @increment_v_a_st16(
3802 // BEWIDTHNUM-NEXT: entry:
3803 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
3804 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
3805 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
3806 // BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
3807 // BEWIDTHNUM-NEXT: ret void
3809 void increment_v_a_st16(volatile struct st16 *s) {
3810 s->a++;
3813 // LE-LABEL: @increment_v_b_st16(
3814 // LE-NEXT: entry:
3815 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3816 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3817 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3818 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3819 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3820 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3821 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3822 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3823 // LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3824 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3825 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3826 // LE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3827 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3828 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3829 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3830 // LE-NEXT: ret void
3832 // BE-LABEL: @increment_v_b_st16(
3833 // BE-NEXT: entry:
3834 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3835 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3836 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3837 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3838 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3839 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3840 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3841 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3842 // BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3843 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3844 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3845 // BE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3846 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3847 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3848 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3849 // BE-NEXT: ret void
3851 // LENUMLOADS-LABEL: @increment_v_b_st16(
3852 // LENUMLOADS-NEXT: entry:
3853 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3854 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
3855 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3856 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3857 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3858 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3859 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3860 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3861 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
3862 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
3863 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3864 // LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3865 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3866 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3867 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3868 // LENUMLOADS-NEXT: ret void
3870 // BENUMLOADS-LABEL: @increment_v_b_st16(
3871 // BENUMLOADS-NEXT: entry:
3872 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
3873 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3874 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
3875 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3876 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3877 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3878 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
3879 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
3880 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
3881 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
3882 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
3883 // BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
3884 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
3885 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
3886 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3887 // BENUMLOADS-NEXT: ret void
3889 // LEWIDTH-LABEL: @increment_v_b_st16(
3890 // LEWIDTH-NEXT: entry:
3891 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
3892 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3893 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
3894 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
3895 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
3896 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3897 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3898 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3899 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3900 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
3901 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3902 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
3903 // LEWIDTH-NEXT: ret void
3905 // BEWIDTH-LABEL: @increment_v_b_st16(
3906 // BEWIDTH-NEXT: entry:
3907 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
3908 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3909 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
3910 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
3911 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3912 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3913 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3914 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3915 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3916 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
3917 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3918 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
3919 // BEWIDTH-NEXT: ret void
3921 // LEWIDTHNUM-LABEL: @increment_v_b_st16(
3922 // LEWIDTHNUM-NEXT: entry:
3923 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
3924 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3925 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
3926 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
3927 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
3928 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3929 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3930 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
3931 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
3932 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
3933 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3934 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
3935 // LEWIDTHNUM-NEXT: ret void
3937 // BEWIDTHNUM-LABEL: @increment_v_b_st16(
3938 // BEWIDTHNUM-NEXT: entry:
3939 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
3940 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3941 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
3942 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
3943 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
3944 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
3945 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3946 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
3947 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
3948 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
3949 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
3950 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
3951 // BEWIDTHNUM-NEXT: ret void
3953 void increment_v_b_st16(volatile struct st16 *s) {
3954 s->b++;
3957 // LE-LABEL: @increment_v_c_st16(
3958 // LE-NEXT: entry:
3959 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3960 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
3961 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
3962 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
3963 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3964 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3965 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3966 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
3967 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3968 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
3969 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
3970 // LE-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
3971 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3972 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3973 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3974 // LE-NEXT: ret void
3976 // BE-LABEL: @increment_v_c_st16(
3977 // BE-NEXT: entry:
3978 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3979 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
3980 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
3981 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
3982 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
3983 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
3984 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
3985 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
3986 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3987 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
3988 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
3989 // BE-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
3990 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
3991 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
3992 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
3993 // BE-NEXT: ret void
3995 // LENUMLOADS-LABEL: @increment_v_c_st16(
3996 // LENUMLOADS-NEXT: entry:
3997 // LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
3998 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
3999 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
4000 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
4001 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4002 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4003 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4004 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
4005 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
4006 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
4007 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
4008 // LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
4009 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
4010 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
4011 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4012 // LENUMLOADS-NEXT: ret void
4014 // BENUMLOADS-LABEL: @increment_v_c_st16(
4015 // BENUMLOADS-NEXT: entry:
4016 // BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
4017 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
4018 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
4019 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4020 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4021 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4022 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
4023 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
4024 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
4025 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
4026 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
4027 // BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
4028 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
4029 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
4030 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4031 // BENUMLOADS-NEXT: ret void
4033 // LEWIDTH-LABEL: @increment_v_c_st16(
4034 // LEWIDTH-NEXT: entry:
4035 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
4036 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4037 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
4038 // LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
4039 // LEWIDTH-NEXT: ret void
4041 // BEWIDTH-LABEL: @increment_v_c_st16(
4042 // BEWIDTH-NEXT: entry:
4043 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
4044 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4045 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
4046 // BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
4047 // BEWIDTH-NEXT: ret void
4049 // LEWIDTHNUM-LABEL: @increment_v_c_st16(
4050 // LEWIDTHNUM-NEXT: entry:
4051 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
4052 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4053 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
4054 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4055 // LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
4056 // LEWIDTHNUM-NEXT: ret void
4058 // BEWIDTHNUM-LABEL: @increment_v_c_st16(
4059 // BEWIDTHNUM-NEXT: entry:
4060 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
4061 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4062 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
4063 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4064 // BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
4065 // BEWIDTHNUM-NEXT: ret void
4067 void increment_v_c_st16(volatile struct st16 *s) {
4068 s->c++;
4071 // LE-LABEL: @increment_v_d_st16(
4072 // LE-NEXT: entry:
4073 // LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
4074 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
4075 // LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
4076 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
4077 // LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4078 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4079 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4080 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
4081 // LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
4082 // LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
4083 // LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
4084 // LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
4085 // LE-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
4086 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
4087 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
4088 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4089 // LE-NEXT: ret void
4091 // BE-LABEL: @increment_v_d_st16(
4092 // BE-NEXT: entry:
4093 // BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
4094 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
4095 // BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
4096 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
4097 // BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4098 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4099 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4100 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
4101 // BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
4102 // BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
4103 // BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
4104 // BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
4105 // BE-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
4106 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
4107 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
4108 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4109 // BE-NEXT: ret void
4111 // LENUMLOADS-LABEL: @increment_v_d_st16(
4112 // LENUMLOADS-NEXT: entry:
4113 // LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
4114 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
4115 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
4116 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
4117 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4118 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4119 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4120 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
4121 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
4122 // LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
4123 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
4124 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
4125 // LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
4126 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
4127 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
4128 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4129 // LENUMLOADS-NEXT: ret void
4131 // BENUMLOADS-LABEL: @increment_v_d_st16(
4132 // BENUMLOADS-NEXT: entry:
4133 // BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
4134 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
4135 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
4136 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
4137 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
4138 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4139 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
4140 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
4141 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
4142 // BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
4143 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
4144 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
4145 // BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
4146 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
4147 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
4148 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
4149 // BENUMLOADS-NEXT: ret void
4151 // LEWIDTH-LABEL: @increment_v_d_st16(
4152 // LEWIDTH-NEXT: entry:
4153 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
4154 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4155 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
4156 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
4157 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4158 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4159 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
4160 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
4161 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4162 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
4163 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4164 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
4165 // LEWIDTH-NEXT: ret void
4167 // BEWIDTH-LABEL: @increment_v_d_st16(
4168 // BEWIDTH-NEXT: entry:
4169 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
4170 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4171 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
4172 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4173 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4174 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
4175 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4176 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
4177 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4178 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
4179 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4180 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
4181 // BEWIDTH-NEXT: ret void
4183 // LEWIDTHNUM-LABEL: @increment_v_d_st16(
4184 // LEWIDTHNUM-NEXT: entry:
4185 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
4186 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4187 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
4188 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
4189 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4190 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4191 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
4192 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
4193 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4194 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
4195 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4196 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
4197 // LEWIDTHNUM-NEXT: ret void
4199 // BEWIDTHNUM-LABEL: @increment_v_d_st16(
4200 // BEWIDTHNUM-NEXT: entry:
4201 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
4202 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4203 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
4204 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4205 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
4206 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
4207 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4208 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
4209 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4210 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
4211 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
4212 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
4213 // BEWIDTHNUM-NEXT: ret void
4215 void increment_v_d_st16(volatile struct st16 *s) {
4216 s->d++;
4218 // st17 has alignment = 1, the AAPCS defines nothing for the
4219 // accessing of b, but accessing c should use char
4220 struct st17 {
4221 int b : 32;
4222 char c : 8;
4223 } __attribute__((packed));
4225 // LE-LABEL: @increment_v_b_st17(
4226 // LE-NEXT: entry:
4227 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4228 // LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
4229 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
4230 // LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4231 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4232 // LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4233 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4234 // LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4235 // LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
4236 // LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4237 // LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4238 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4239 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4240 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4241 // LE-NEXT: ret void
4243 // BE-LABEL: @increment_v_b_st17(
4244 // BE-NEXT: entry:
4245 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4246 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
4247 // BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4248 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4249 // BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4250 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4251 // BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4252 // BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4253 // BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
4254 // BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4255 // BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4256 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4257 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4258 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4259 // BE-NEXT: ret void
4261 // LENUMLOADS-LABEL: @increment_v_b_st17(
4262 // LENUMLOADS-NEXT: entry:
4263 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4264 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
4265 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
4266 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4267 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4268 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4269 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4270 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4271 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
4272 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4273 // LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4274 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4275 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4276 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4277 // LENUMLOADS-NEXT: ret void
4279 // BENUMLOADS-LABEL: @increment_v_b_st17(
4280 // BENUMLOADS-NEXT: entry:
4281 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4282 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
4283 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4284 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4285 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4286 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4287 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4288 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4289 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
4290 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4291 // BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4292 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4293 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4294 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4295 // BENUMLOADS-NEXT: ret void
4297 // LEWIDTH-LABEL: @increment_v_b_st17(
4298 // LEWIDTH-NEXT: entry:
4299 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4300 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
4301 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
4302 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4303 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4304 // LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4305 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4306 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4307 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
4308 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4309 // LEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4310 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4311 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4312 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4313 // LEWIDTH-NEXT: ret void
4315 // BEWIDTH-LABEL: @increment_v_b_st17(
4316 // BEWIDTH-NEXT: entry:
4317 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4318 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
4319 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4320 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4321 // BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4322 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4323 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4324 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4325 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
4326 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4327 // BEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4328 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4329 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4330 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4331 // BEWIDTH-NEXT: ret void
4333 // LEWIDTHNUM-LABEL: @increment_v_b_st17(
4334 // LEWIDTHNUM-NEXT: entry:
4335 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4336 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
4337 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
4338 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4339 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4340 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4341 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4342 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4343 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
4344 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4345 // LEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4346 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4347 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4348 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4349 // LEWIDTHNUM-NEXT: ret void
4351 // BEWIDTHNUM-LABEL: @increment_v_b_st17(
4352 // BEWIDTHNUM-NEXT: entry:
4353 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4354 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
4355 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
4356 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4357 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
4358 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4359 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
4360 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4361 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
4362 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4363 // BEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4364 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
4365 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
4366 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
4367 // BEWIDTHNUM-NEXT: ret void
4369 void increment_v_b_st17(volatile struct st17 *s) {
4370 s->b++;
4373 // LE-LABEL: @increment_v_c_st17(
4374 // LE-NEXT: entry:
4375 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4376 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
4377 // LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
4378 // LE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
4379 // LE-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
4380 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4381 // LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
4382 // LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4383 // LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
4384 // LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4385 // LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4386 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4387 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
4388 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
4389 // LE-NEXT: ret void
4391 // BE-LABEL: @increment_v_c_st17(
4392 // BE-NEXT: entry:
4393 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4394 // BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
4395 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
4396 // BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
4397 // BE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
4398 // BE-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
4399 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4400 // BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
4401 // BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
4402 // BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4403 // BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4404 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4405 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
4406 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
4407 // BE-NEXT: ret void
4409 // LENUMLOADS-LABEL: @increment_v_c_st17(
4410 // LENUMLOADS-NEXT: entry:
4411 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4412 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
4413 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
4414 // LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
4415 // LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
4416 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4417 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
4418 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4419 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
4420 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
4421 // LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4422 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4423 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
4424 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
4425 // LENUMLOADS-NEXT: ret void
4427 // BENUMLOADS-LABEL: @increment_v_c_st17(
4428 // BENUMLOADS-NEXT: entry:
4429 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
4430 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
4431 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
4432 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
4433 // BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
4434 // BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
4435 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
4436 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
4437 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
4438 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
4439 // BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
4440 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
4441 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
4442 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
4443 // BENUMLOADS-NEXT: ret void
4445 // LEWIDTH-LABEL: @increment_v_c_st17(
4446 // LEWIDTH-NEXT: entry:
4447 // LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
4448 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4449 // LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4450 // LEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
4451 // LEWIDTH-NEXT: ret void
4453 // BEWIDTH-LABEL: @increment_v_c_st17(
4454 // BEWIDTH-NEXT: entry:
4455 // BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
4456 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4457 // BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4458 // BEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
4459 // BEWIDTH-NEXT: ret void
4461 // LEWIDTHNUM-LABEL: @increment_v_c_st17(
4462 // LEWIDTHNUM-NEXT: entry:
4463 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
4464 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4465 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4466 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4467 // LEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
4468 // LEWIDTHNUM-NEXT: ret void
4470 // BEWIDTHNUM-LABEL: @increment_v_c_st17(
4471 // BEWIDTHNUM-NEXT: entry:
4472 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
4473 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4474 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
4475 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
4476 // BEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
4477 // BEWIDTHNUM-NEXT: ret void
4479 void increment_v_c_st17(volatile struct st17 *s) {
4480 s->c++;
4483 // A zero bitfield should block, as the C11 specification
4484 // requires a and b to be different memory positions
4485 struct zero_bitfield {
4486 int a : 8;
4487 char : 0;
4488 int b : 8;
4491 // LE-LABEL: @increment_a_zero_bitfield(
4492 // LE-NEXT: entry:
4493 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4494 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4495 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4496 // LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4497 // LE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4498 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4499 // LE-NEXT: ret void
4501 // BE-LABEL: @increment_a_zero_bitfield(
4502 // BE-NEXT: entry:
4503 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4504 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4505 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4506 // BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4507 // BE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4508 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4509 // BE-NEXT: ret void
4511 // LENUMLOADS-LABEL: @increment_a_zero_bitfield(
4512 // LENUMLOADS-NEXT: entry:
4513 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4514 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4515 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4516 // LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4517 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
4518 // LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4519 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4520 // LENUMLOADS-NEXT: ret void
4522 // BENUMLOADS-LABEL: @increment_a_zero_bitfield(
4523 // BENUMLOADS-NEXT: entry:
4524 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4525 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4526 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4527 // BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4528 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
4529 // BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4530 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4531 // BENUMLOADS-NEXT: ret void
4533 // LEWIDTH-LABEL: @increment_a_zero_bitfield(
4534 // LEWIDTH-NEXT: entry:
4535 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4536 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4537 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4538 // LEWIDTH-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4539 // LEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4540 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4541 // LEWIDTH-NEXT: ret void
4543 // BEWIDTH-LABEL: @increment_a_zero_bitfield(
4544 // BEWIDTH-NEXT: entry:
4545 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4546 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4547 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4548 // BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4549 // BEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4550 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4551 // BEWIDTH-NEXT: ret void
4553 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
4554 // LEWIDTHNUM-NEXT: entry:
4555 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4556 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4557 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4558 // LEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4559 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
4560 // LEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4561 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4562 // LEWIDTHNUM-NEXT: ret void
4564 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
4565 // BEWIDTHNUM-NEXT: entry:
4566 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
4567 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4568 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4569 // BEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
4570 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
4571 // BEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
4572 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
4573 // BEWIDTHNUM-NEXT: ret void
4575 void increment_a_zero_bitfield(volatile struct zero_bitfield *s) {
4576 s->a++;
4579 // LE-LABEL: @increment_b_zero_bitfield(
4580 // LE-NEXT: entry:
4581 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4582 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4583 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4584 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4585 // LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4586 // LE-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4587 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4588 // LE-NEXT: ret void
4590 // BE-LABEL: @increment_b_zero_bitfield(
4591 // BE-NEXT: entry:
4592 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4593 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4594 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4595 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4596 // BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4597 // BE-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4598 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4599 // BE-NEXT: ret void
4601 // LENUMLOADS-LABEL: @increment_b_zero_bitfield(
4602 // LENUMLOADS-NEXT: entry:
4603 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4604 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4605 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4606 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4607 // LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4608 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[B]], align 1
4609 // LENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4610 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4611 // LENUMLOADS-NEXT: ret void
4613 // BENUMLOADS-LABEL: @increment_b_zero_bitfield(
4614 // BENUMLOADS-NEXT: entry:
4615 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4616 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4617 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4618 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4619 // BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4620 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[B]], align 1
4621 // BENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4622 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4623 // BENUMLOADS-NEXT: ret void
4625 // LEWIDTH-LABEL: @increment_b_zero_bitfield(
4626 // LEWIDTH-NEXT: entry:
4627 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4628 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4629 // LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4630 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4631 // LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4632 // LEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4633 // LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4634 // LEWIDTH-NEXT: ret void
4636 // BEWIDTH-LABEL: @increment_b_zero_bitfield(
4637 // BEWIDTH-NEXT: entry:
4638 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4639 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4640 // BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4641 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4642 // BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4643 // BEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4644 // BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4645 // BEWIDTH-NEXT: ret void
4647 // LEWIDTHNUM-LABEL: @increment_b_zero_bitfield(
4648 // LEWIDTHNUM-NEXT: entry:
4649 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4650 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4651 // LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4652 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4653 // LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4654 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[B]], align 1
4655 // LEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4656 // LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4657 // LEWIDTHNUM-NEXT: ret void
4659 // BEWIDTHNUM-LABEL: @increment_b_zero_bitfield(
4660 // BEWIDTHNUM-NEXT: entry:
4661 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD:%.*]], ptr [[S:%.*]], i32 0, i32 1
4662 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[B]], align 1
4663 // BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
4664 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
4665 // BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
4666 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[B]], align 1
4667 // BEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[B]], align 1
4668 // BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
4669 // BEWIDTHNUM-NEXT: ret void
4671 void increment_b_zero_bitfield(volatile struct zero_bitfield *s) {
4672 s->b++;
4675 // The zero bitfield here does not affect
4676 struct zero_bitfield_ok {
4677 short a : 8;
4678 char a1 : 8;
4679 long : 0;
4680 int b : 24;
4683 // LE-LABEL: @increment_a_zero_bitfield_ok(
4684 // LE-NEXT: entry:
4685 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4686 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
4687 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
4688 // LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4689 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[S]], align 4
4690 // LE-NEXT: [[BF_ASHR2:%.*]] = ashr i16 [[BF_LOAD1]], 8
4691 // LE-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR2]] to i8
4692 // LE-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
4693 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
4694 // LE-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
4695 // LE-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
4696 // LE-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
4697 // LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
4698 // LE-NEXT: [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
4699 // LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
4700 // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
4701 // LE-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
4702 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
4703 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
4704 // LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i16 [[BF_RESULT_ASHR]] to i8
4705 // LE-NEXT: ret void
4707 // BE-LABEL: @increment_a_zero_bitfield_ok(
4708 // BE-NEXT: entry:
4709 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4710 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
4711 // BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4712 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[S]], align 4
4713 // BE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD1]], 8
4714 // BE-NEXT: [[BF_ASHR2:%.*]] = ashr i16 [[BF_SHL]], 8
4715 // BE-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR2]] to i8
4716 // BE-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
4717 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
4718 // BE-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
4719 // BE-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
4720 // BE-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
4721 // BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
4722 // BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
4723 // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
4724 // BE-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
4725 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
4726 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
4727 // BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i16 [[BF_RESULT_ASHR]] to i8
4728 // BE-NEXT: ret void
4730 // LENUMLOADS-LABEL: @increment_a_zero_bitfield_ok(
4731 // LENUMLOADS-NEXT: entry:
4732 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4733 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
4734 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
4735 // LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4736 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[S]], align 4
4737 // LENUMLOADS-NEXT: [[BF_ASHR2:%.*]] = ashr i16 [[BF_LOAD1]], 8
4738 // LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR2]] to i8
4739 // LENUMLOADS-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
4740 // LENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
4741 // LENUMLOADS-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
4742 // LENUMLOADS-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
4743 // LENUMLOADS-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
4744 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
4745 // LENUMLOADS-NEXT: [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
4746 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
4747 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
4748 // LENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
4749 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
4750 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
4751 // LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i16 [[BF_RESULT_ASHR]] to i8
4752 // LENUMLOADS-NEXT: ret void
4754 // BENUMLOADS-LABEL: @increment_a_zero_bitfield_ok(
4755 // BENUMLOADS-NEXT: entry:
4756 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4757 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
4758 // BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4759 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[S]], align 4
4760 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD1]], 8
4761 // BENUMLOADS-NEXT: [[BF_ASHR2:%.*]] = ashr i16 [[BF_SHL]], 8
4762 // BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i16 [[BF_ASHR2]] to i8
4763 // BENUMLOADS-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
4764 // BENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
4765 // BENUMLOADS-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
4766 // BENUMLOADS-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
4767 // BENUMLOADS-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
4768 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
4769 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
4770 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
4771 // BENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
4772 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i16 [[BF_VALUE]], 8
4773 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i16 [[BF_RESULT_SHL]], 8
4774 // BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i16 [[BF_RESULT_ASHR]] to i8
4775 // BENUMLOADS-NEXT: ret void
4777 // LEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
4778 // LEWIDTH-NEXT: entry:
4779 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4780 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
4781 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
4782 // LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4783 // LEWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
4784 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4785 // LEWIDTH-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
4786 // LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
4787 // LEWIDTH-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
4788 // LEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
4789 // LEWIDTH-NEXT: ret void
4791 // BEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
4792 // BEWIDTH-NEXT: entry:
4793 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4794 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
4795 // BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4796 // BEWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
4797 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4798 // BEWIDTH-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
4799 // BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
4800 // BEWIDTH-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
4801 // BEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
4802 // BEWIDTH-NEXT: ret void
4804 // LEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
4805 // LEWIDTHNUM-NEXT: entry:
4806 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4807 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
4808 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
4809 // LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4810 // LEWIDTHNUM-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
4811 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4812 // LEWIDTHNUM-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
4813 // LEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
4814 // LEWIDTHNUM-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
4815 // LEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4816 // LEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
4817 // LEWIDTHNUM-NEXT: ret void
4819 // BEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
4820 // BEWIDTHNUM-NEXT: entry:
4821 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
4822 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
4823 // BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
4824 // BEWIDTHNUM-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
4825 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4826 // BEWIDTHNUM-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
4827 // BEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
4828 // BEWIDTHNUM-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
4829 // BEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
4830 // BEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
4831 // BEWIDTHNUM-NEXT: ret void
4833 void increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
4834 s->a1 += s->a;
4837 // LE-LABEL: @increment_b_zero_bitfield_ok(
4838 // LE-NEXT: entry:
4839 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4840 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4841 // LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
4842 // LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 8
4843 // LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4844 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4845 // LE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4846 // LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4847 // LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4848 // LE-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4849 // LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4850 // LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4851 // LE-NEXT: ret void
4853 // BE-LABEL: @increment_b_zero_bitfield_ok(
4854 // BE-NEXT: entry:
4855 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4856 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4857 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 8
4858 // BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4859 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4860 // BE-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4861 // BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4862 // BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4863 // BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4864 // BE-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4865 // BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4866 // BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4867 // BE-NEXT: ret void
4869 // LENUMLOADS-LABEL: @increment_b_zero_bitfield_ok(
4870 // LENUMLOADS-NEXT: entry:
4871 // LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4872 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4873 // LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
4874 // LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 8
4875 // LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4876 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4877 // LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4878 // LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4879 // LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4880 // LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4881 // LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4882 // LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4883 // LENUMLOADS-NEXT: ret void
4885 // BENUMLOADS-LABEL: @increment_b_zero_bitfield_ok(
4886 // BENUMLOADS-NEXT: entry:
4887 // BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4888 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4889 // BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 8
4890 // BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4891 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4892 // BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4893 // BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4894 // BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4895 // BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4896 // BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4897 // BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4898 // BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4899 // BENUMLOADS-NEXT: ret void
4901 // LEWIDTH-LABEL: @increment_b_zero_bitfield_ok(
4902 // LEWIDTH-NEXT: entry:
4903 // LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4904 // LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4905 // LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
4906 // LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 8
4907 // LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4908 // LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4909 // LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4910 // LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4911 // LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4912 // LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4913 // LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4914 // LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4915 // LEWIDTH-NEXT: ret void
4917 // BEWIDTH-LABEL: @increment_b_zero_bitfield_ok(
4918 // BEWIDTH-NEXT: entry:
4919 // BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4920 // BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4921 // BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 8
4922 // BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4923 // BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4924 // BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4925 // BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4926 // BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4927 // BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4928 // BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4929 // BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4930 // BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4931 // BEWIDTH-NEXT: ret void
4933 // LEWIDTHNUM-LABEL: @increment_b_zero_bitfield_ok(
4934 // LEWIDTHNUM-NEXT: entry:
4935 // LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4936 // LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4937 // LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
4938 // LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 8
4939 // LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4940 // LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4941 // LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4942 // LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16777216
4943 // LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
4944 // LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4945 // LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4946 // LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4947 // LEWIDTHNUM-NEXT: ret void
4949 // BEWIDTHNUM-LABEL: @increment_b_zero_bitfield_ok(
4950 // BEWIDTHNUM-NEXT: entry:
4951 // BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ZERO_BITFIELD_OK:%.*]], ptr [[S:%.*]], i32 0, i32 1
4952 // BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 4
4953 // BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 8
4954 // BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
4955 // BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 4
4956 // BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 16777215
4957 // BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4958 // BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 255
4959 // BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
4960 // BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[B]], align 4
4961 // BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 8
4962 // BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 8
4963 // BEWIDTHNUM-NEXT: ret void
4965 void increment_b_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
4966 s->b++;