1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -O0 -mtriple=x86_64-unknown -mcpu=skx -o - %s | FileCheck %s --check-prefix=X64-O0
3 ; RUN: llc -mtriple=x86_64-unknown -mcpu=skx -o - %s | FileCheck %s --check-prefix=X64
4 ; RUN: llc -O0 -mtriple=i686-unknown -mcpu=skx -o - %s | FileCheck %s --check-prefix=X86-O0
5 ; RUN: llc -mtriple=i686-unknown -mcpu=skx -o - %s | FileCheck %s --check-prefix=X86
7 @c = external dso_local constant i8, align 1
11 ; X64-O0: # %bb.0: # %entry
12 ; X64-O0-NEXT: movzbl c, %ecx
13 ; X64-O0-NEXT: xorl %eax, %eax
14 ; X64-O0-NEXT: subl %ecx, %eax
15 ; X64-O0-NEXT: movslq %eax, %rcx
16 ; X64-O0-NEXT: xorl %eax, %eax
17 ; X64-O0-NEXT: # kill: def $rax killed $eax
18 ; X64-O0-NEXT: subq %rcx, %rax
19 ; X64-O0-NEXT: # kill: def $al killed $al killed $rax
20 ; X64-O0-NEXT: cmpb $0, %al
21 ; X64-O0-NEXT: setne %al
22 ; X64-O0-NEXT: andb $1, %al
23 ; X64-O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
24 ; X64-O0-NEXT: cmpb $0, c
25 ; X64-O0-NEXT: setne %al
26 ; X64-O0-NEXT: xorb $-1, %al
27 ; X64-O0-NEXT: xorb $-1, %al
28 ; X64-O0-NEXT: andb $1, %al
29 ; X64-O0-NEXT: movzbl %al, %eax
30 ; X64-O0-NEXT: movzbl c, %ecx
31 ; X64-O0-NEXT: cmpl %ecx, %eax
32 ; X64-O0-NEXT: setle %al
33 ; X64-O0-NEXT: andb $1, %al
34 ; X64-O0-NEXT: movzbl %al, %eax
35 ; X64-O0-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
39 ; X64: # %bb.0: # %entry
40 ; X64-NEXT: movzbl c(%rip), %eax
41 ; X64-NEXT: xorl %ecx, %ecx
42 ; X64-NEXT: testl %eax, %eax
43 ; X64-NEXT: setne -{{[0-9]+}}(%rsp)
45 ; X64-NEXT: xorl %edx, %edx
46 ; X64-NEXT: cmpl %eax, %ecx
48 ; X64-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
52 ; X86-O0: # %bb.0: # %entry
53 ; X86-O0-NEXT: subl $8, %esp
54 ; X86-O0-NEXT: .cfi_def_cfa_offset 12
55 ; X86-O0-NEXT: movb c, %al
56 ; X86-O0-NEXT: cmpb $0, %al
57 ; X86-O0-NEXT: setne %al
58 ; X86-O0-NEXT: andb $1, %al
59 ; X86-O0-NEXT: movb %al, {{[0-9]+}}(%esp)
60 ; X86-O0-NEXT: cmpb $0, c
61 ; X86-O0-NEXT: setne %al
62 ; X86-O0-NEXT: xorb $-1, %al
63 ; X86-O0-NEXT: xorb $-1, %al
64 ; X86-O0-NEXT: andb $1, %al
65 ; X86-O0-NEXT: movzbl %al, %eax
66 ; X86-O0-NEXT: movzbl c, %ecx
67 ; X86-O0-NEXT: cmpl %ecx, %eax
68 ; X86-O0-NEXT: setle %al
69 ; X86-O0-NEXT: andb $1, %al
70 ; X86-O0-NEXT: movzbl %al, %eax
71 ; X86-O0-NEXT: movl %eax, (%esp)
72 ; X86-O0-NEXT: addl $8, %esp
73 ; X86-O0-NEXT: .cfi_def_cfa_offset 4
77 ; X86: # %bb.0: # %entry
78 ; X86-NEXT: subl $8, %esp
79 ; X86-NEXT: .cfi_def_cfa_offset 12
80 ; X86-NEXT: movzbl c, %eax
81 ; X86-NEXT: xorl %ecx, %ecx
82 ; X86-NEXT: testl %eax, %eax
83 ; X86-NEXT: setne {{[0-9]+}}(%esp)
85 ; X86-NEXT: xorl %edx, %edx
86 ; X86-NEXT: cmpl %eax, %ecx
88 ; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
89 ; X86-NEXT: addl $8, %esp
90 ; X86-NEXT: .cfi_def_cfa_offset 4
93 %a = alloca i8, align 1
94 %b = alloca i32, align 4
95 %0 = load i8, i8* @c, align 1
96 %conv = zext i8 %0 to i32
97 %sub = sub nsw i32 0, %conv
98 %conv1 = sext i32 %sub to i64
99 %sub2 = sub nsw i64 0, %conv1
100 %conv3 = trunc i64 %sub2 to i8
101 %tobool = icmp ne i8 %conv3, 0
102 %frombool = zext i1 %tobool to i8
103 store i8 %frombool, i8* %a, align 1
104 %1 = load i8, i8* @c, align 1
105 %tobool4 = icmp ne i8 %1, 0
106 %lnot = xor i1 %tobool4, true
107 %lnot5 = xor i1 %lnot, true
108 %conv6 = zext i1 %lnot5 to i32
109 %2 = load i8, i8* @c, align 1
110 %conv7 = zext i8 %2 to i32
111 %cmp = icmp sle i32 %conv6, %conv7
112 %conv8 = zext i1 %cmp to i32
113 store i32 %conv8, i32* %b, align 4
117 @var_5 = external dso_local global i32, align 4
118 @var_57 = external dso_local global i64, align 8
119 @_ZN8struct_210member_2_0E = external dso_local global i64, align 8
123 ; X64-O0: # %bb.0: # %entry
124 ; X64-O0-NEXT: movslq var_5, %rax
125 ; X64-O0-NEXT: movabsq $8381627093, %rcx # imm = 0x1F3957AD5
126 ; X64-O0-NEXT: addq %rcx, %rax
127 ; X64-O0-NEXT: cmpq $0, %rax
128 ; X64-O0-NEXT: setne %al
129 ; X64-O0-NEXT: andb $1, %al
130 ; X64-O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
131 ; X64-O0-NEXT: movl var_5, %eax
132 ; X64-O0-NEXT: xorl $-1, %eax
133 ; X64-O0-NEXT: cmpl $0, %eax
134 ; X64-O0-NEXT: setne %al
135 ; X64-O0-NEXT: xorb $-1, %al
136 ; X64-O0-NEXT: andb $1, %al
137 ; X64-O0-NEXT: movzbl %al, %eax
138 ; X64-O0-NEXT: # kill: def $rax killed $eax
139 ; X64-O0-NEXT: movslq var_5, %rcx
140 ; X64-O0-NEXT: addq $7093, %rcx # imm = 0x1BB5
141 ; X64-O0-NEXT: cmpq %rcx, %rax
142 ; X64-O0-NEXT: setg %al
143 ; X64-O0-NEXT: andb $1, %al
144 ; X64-O0-NEXT: movzbl %al, %eax
145 ; X64-O0-NEXT: # kill: def $rax killed $eax
146 ; X64-O0-NEXT: movq %rax, var_57
147 ; X64-O0-NEXT: movl var_5, %eax
148 ; X64-O0-NEXT: xorl $-1, %eax
149 ; X64-O0-NEXT: cmpl $0, %eax
150 ; X64-O0-NEXT: setne %al
151 ; X64-O0-NEXT: xorb $-1, %al
152 ; X64-O0-NEXT: andb $1, %al
153 ; X64-O0-NEXT: movzbl %al, %eax
154 ; X64-O0-NEXT: # kill: def $rax killed $eax
155 ; X64-O0-NEXT: movq %rax, _ZN8struct_210member_2_0E
159 ; X64: # %bb.0: # %entry
160 ; X64-NEXT: movslq var_5(%rip), %rax
161 ; X64-NEXT: movabsq $-8381627093, %rcx # imm = 0xFFFFFFFE0C6A852B
162 ; X64-NEXT: cmpq %rcx, %rax
163 ; X64-NEXT: setne -{{[0-9]+}}(%rsp)
164 ; X64-NEXT: xorl %ecx, %ecx
165 ; X64-NEXT: cmpq $-1, %rax
167 ; X64-NEXT: xorl %edx, %edx
168 ; X64-NEXT: cmpl $-1, %eax
170 ; X64-NEXT: addq $7093, %rax # imm = 0x1BB5
171 ; X64-NEXT: xorl %esi, %esi
172 ; X64-NEXT: cmpq %rax, %rdx
173 ; X64-NEXT: setg %sil
174 ; X64-NEXT: movq %rsi, var_57(%rip)
175 ; X64-NEXT: movq %rcx, _ZN8struct_210member_2_0E(%rip)
179 ; X86-O0: # %bb.0: # %entry
180 ; X86-O0-NEXT: subl $1, %esp
181 ; X86-O0-NEXT: .cfi_def_cfa_offset 5
182 ; X86-O0-NEXT: movl var_5, %eax
183 ; X86-O0-NEXT: movl %eax, %ecx
184 ; X86-O0-NEXT: sarl $31, %ecx
185 ; X86-O0-NEXT: xorl $208307499, %eax # imm = 0xC6A852B
186 ; X86-O0-NEXT: xorl $-2, %ecx
187 ; X86-O0-NEXT: orl %ecx, %eax
188 ; X86-O0-NEXT: setne (%esp)
189 ; X86-O0-NEXT: movl var_5, %ecx
190 ; X86-O0-NEXT: movl %ecx, %eax
191 ; X86-O0-NEXT: sarl $31, %eax
192 ; X86-O0-NEXT: movl %ecx, %edx
193 ; X86-O0-NEXT: subl $-1, %edx
194 ; X86-O0-NEXT: sete %dl
195 ; X86-O0-NEXT: movzbl %dl, %edx
196 ; X86-O0-NEXT: addl $7093, %ecx # imm = 0x1BB5
197 ; X86-O0-NEXT: adcl $0, %eax
198 ; X86-O0-NEXT: subl %edx, %ecx
199 ; X86-O0-NEXT: sbbl $0, %eax
200 ; X86-O0-NEXT: setl %al
201 ; X86-O0-NEXT: movzbl %al, %eax
202 ; X86-O0-NEXT: movl %eax, var_57
203 ; X86-O0-NEXT: movl $0, var_57+4
204 ; X86-O0-NEXT: movl var_5, %eax
205 ; X86-O0-NEXT: subl $-1, %eax
206 ; X86-O0-NEXT: sete %al
207 ; X86-O0-NEXT: movzbl %al, %eax
208 ; X86-O0-NEXT: movl %eax, _ZN8struct_210member_2_0E
209 ; X86-O0-NEXT: movl $0, _ZN8struct_210member_2_0E+4
210 ; X86-O0-NEXT: addl $1, %esp
211 ; X86-O0-NEXT: .cfi_def_cfa_offset 4
215 ; X86: # %bb.0: # %entry
216 ; X86-NEXT: pushl %esi
217 ; X86-NEXT: .cfi_def_cfa_offset 8
218 ; X86-NEXT: subl $1, %esp
219 ; X86-NEXT: .cfi_def_cfa_offset 9
220 ; X86-NEXT: .cfi_offset %esi, -8
221 ; X86-NEXT: movl var_5, %edx
222 ; X86-NEXT: movl %edx, %eax
223 ; X86-NEXT: xorl $208307499, %eax # imm = 0xC6A852B
224 ; X86-NEXT: movl %edx, %esi
225 ; X86-NEXT: sarl $31, %esi
226 ; X86-NEXT: movl %esi, %ecx
227 ; X86-NEXT: xorl $-2, %ecx
228 ; X86-NEXT: orl %eax, %ecx
229 ; X86-NEXT: setne (%esp)
230 ; X86-NEXT: movl %edx, %ecx
231 ; X86-NEXT: andl %esi, %ecx
232 ; X86-NEXT: xorl %eax, %eax
233 ; X86-NEXT: cmpl $-1, %ecx
235 ; X86-NEXT: xorl %ecx, %ecx
236 ; X86-NEXT: cmpl $-1, %edx
238 ; X86-NEXT: addl $7093, %edx # imm = 0x1BB5
239 ; X86-NEXT: adcl $0, %esi
240 ; X86-NEXT: cmpl %ecx, %edx
241 ; X86-NEXT: sbbl $0, %esi
243 ; X86-NEXT: movzbl %cl, %ecx
244 ; X86-NEXT: movl %ecx, var_57
245 ; X86-NEXT: movl $0, var_57+4
246 ; X86-NEXT: movl %eax, _ZN8struct_210member_2_0E
247 ; X86-NEXT: movl $0, _ZN8struct_210member_2_0E+4
248 ; X86-NEXT: addl $1, %esp
249 ; X86-NEXT: .cfi_def_cfa_offset 8
250 ; X86-NEXT: popl %esi
251 ; X86-NEXT: .cfi_def_cfa_offset 4
254 %a = alloca i8, align 1
255 %0 = load i32, i32* @var_5, align 4
256 %conv = sext i32 %0 to i64
257 %add = add nsw i64 %conv, 8381627093
258 %tobool = icmp ne i64 %add, 0
259 %frombool = zext i1 %tobool to i8
260 store i8 %frombool, i8* %a, align 1
261 %1 = load i32, i32* @var_5, align 4
262 %neg = xor i32 %1, -1
263 %tobool1 = icmp ne i32 %neg, 0
264 %lnot = xor i1 %tobool1, true
265 %conv2 = zext i1 %lnot to i64
266 %2 = load i32, i32* @var_5, align 4
267 %conv3 = sext i32 %2 to i64
268 %add4 = add nsw i64 %conv3, 7093
269 %cmp = icmp sgt i64 %conv2, %add4
270 %conv5 = zext i1 %cmp to i64
271 store i64 %conv5, i64* @var_57, align 8
272 %3 = load i32, i32* @var_5, align 4
273 %neg6 = xor i32 %3, -1
274 %tobool7 = icmp ne i32 %neg6, 0
275 %lnot8 = xor i1 %tobool7, true
276 %conv9 = zext i1 %lnot8 to i64
277 store i64 %conv9, i64* @_ZN8struct_210member_2_0E, align 8
282 @var_7 = external dso_local global i8, align 1
286 ; X64-O0: # %bb.0: # %entry
287 ; X64-O0-NEXT: movzbl var_7, %eax
288 ; X64-O0-NEXT: cmpb $0, var_7
289 ; X64-O0-NEXT: setne %cl
290 ; X64-O0-NEXT: xorb $-1, %cl
291 ; X64-O0-NEXT: andb $1, %cl
292 ; X64-O0-NEXT: movzbl %cl, %ecx
293 ; X64-O0-NEXT: xorl %ecx, %eax
294 ; X64-O0-NEXT: # kill: def $ax killed $ax killed $eax
295 ; X64-O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
296 ; X64-O0-NEXT: movzbl var_7, %eax
297 ; X64-O0-NEXT: # kill: def $ax killed $ax killed $eax
298 ; X64-O0-NEXT: cmpw $0, %ax
299 ; X64-O0-NEXT: setne %al
300 ; X64-O0-NEXT: xorb $-1, %al
301 ; X64-O0-NEXT: andb $1, %al
302 ; X64-O0-NEXT: movzbl %al, %eax
303 ; X64-O0-NEXT: movzbl var_7, %ecx
304 ; X64-O0-NEXT: cmpl %ecx, %eax
305 ; X64-O0-NEXT: sete %al
306 ; X64-O0-NEXT: andb $1, %al
307 ; X64-O0-NEXT: movzbl %al, %eax
308 ; X64-O0-NEXT: movw %ax, %cx
309 ; X64-O0-NEXT: # implicit-def: $rax
310 ; X64-O0-NEXT: movw %cx, (%rax)
314 ; X64: # %bb.0: # %entry
315 ; X64-NEXT: movzbl var_7(%rip), %eax
316 ; X64-NEXT: xorl %ecx, %ecx
317 ; X64-NEXT: testl %eax, %eax
319 ; X64-NEXT: movl %eax, %edx
320 ; X64-NEXT: xorl %ecx, %edx
321 ; X64-NEXT: movw %dx, -{{[0-9]+}}(%rsp)
322 ; X64-NEXT: xorl %edx, %edx
323 ; X64-NEXT: cmpl %eax, %ecx
325 ; X64-NEXT: movw %dx, (%rax)
329 ; X86-O0: # %bb.0: # %entry
330 ; X86-O0-NEXT: subl $2, %esp
331 ; X86-O0-NEXT: .cfi_def_cfa_offset 6
332 ; X86-O0-NEXT: movzbl var_7, %eax
333 ; X86-O0-NEXT: cmpb $0, var_7
334 ; X86-O0-NEXT: setne %cl
335 ; X86-O0-NEXT: xorb $-1, %cl
336 ; X86-O0-NEXT: andb $1, %cl
337 ; X86-O0-NEXT: movzbl %cl, %ecx
338 ; X86-O0-NEXT: xorl %ecx, %eax
339 ; X86-O0-NEXT: # kill: def $ax killed $ax killed $eax
340 ; X86-O0-NEXT: movw %ax, (%esp)
341 ; X86-O0-NEXT: movzbl var_7, %eax
342 ; X86-O0-NEXT: # kill: def $ax killed $ax killed $eax
343 ; X86-O0-NEXT: cmpw $0, %ax
344 ; X86-O0-NEXT: setne %al
345 ; X86-O0-NEXT: xorb $-1, %al
346 ; X86-O0-NEXT: andb $1, %al
347 ; X86-O0-NEXT: movzbl %al, %eax
348 ; X86-O0-NEXT: movzbl var_7, %ecx
349 ; X86-O0-NEXT: cmpl %ecx, %eax
350 ; X86-O0-NEXT: sete %al
351 ; X86-O0-NEXT: andb $1, %al
352 ; X86-O0-NEXT: movzbl %al, %eax
353 ; X86-O0-NEXT: movw %ax, %cx
354 ; X86-O0-NEXT: # implicit-def: $eax
355 ; X86-O0-NEXT: movw %cx, (%eax)
356 ; X86-O0-NEXT: addl $2, %esp
357 ; X86-O0-NEXT: .cfi_def_cfa_offset 4
361 ; X86: # %bb.0: # %entry
362 ; X86-NEXT: subl $2, %esp
363 ; X86-NEXT: .cfi_def_cfa_offset 6
364 ; X86-NEXT: movzbl var_7, %ecx
365 ; X86-NEXT: xorl %eax, %eax
366 ; X86-NEXT: testl %ecx, %ecx
368 ; X86-NEXT: movl %ecx, %edx
369 ; X86-NEXT: xorl %eax, %edx
370 ; X86-NEXT: movw %dx, (%esp)
371 ; X86-NEXT: xorl %edx, %edx
372 ; X86-NEXT: cmpl %ecx, %eax
374 ; X86-NEXT: movw %dx, (%eax)
375 ; X86-NEXT: addl $2, %esp
376 ; X86-NEXT: .cfi_def_cfa_offset 4
379 %a = alloca i16, align 2
380 %0 = load i8, i8* @var_7, align 1
381 %conv = zext i8 %0 to i32
382 %1 = load i8, i8* @var_7, align 1
383 %tobool = icmp ne i8 %1, 0
384 %lnot = xor i1 %tobool, true
385 %conv1 = zext i1 %lnot to i32
386 %xor = xor i32 %conv, %conv1
387 %conv2 = trunc i32 %xor to i16
388 store i16 %conv2, i16* %a, align 2
389 %2 = load i8, i8* @var_7, align 1
390 %conv3 = zext i8 %2 to i16
391 %tobool4 = icmp ne i16 %conv3, 0
392 %lnot5 = xor i1 %tobool4, true
393 %conv6 = zext i1 %lnot5 to i32
394 %3 = load i8, i8* @var_7, align 1
395 %conv7 = zext i8 %3 to i32
396 %cmp = icmp eq i32 %conv6, %conv7
397 %conv8 = zext i1 %cmp to i32
398 %conv9 = trunc i32 %conv8 to i16
399 store i16 %conv9, i16* undef, align 2
404 @var_13 = external dso_local global i32, align 4
405 @var_16 = external dso_local global i32, align 4
406 @var_46 = external dso_local global i32, align 4
408 define void @f3() #0 {
410 ; X64-O0: # %bb.0: # %entry
411 ; X64-O0-NEXT: movl var_13, %eax
412 ; X64-O0-NEXT: xorl $-1, %eax
413 ; X64-O0-NEXT: movl %eax, %eax
414 ; X64-O0-NEXT: # kill: def $rax killed $eax
415 ; X64-O0-NEXT: cmpl $0, var_13
416 ; X64-O0-NEXT: setne %cl
417 ; X64-O0-NEXT: xorb $-1, %cl
418 ; X64-O0-NEXT: andb $1, %cl
419 ; X64-O0-NEXT: movzbl %cl, %ecx
420 ; X64-O0-NEXT: # kill: def $rcx killed $ecx
421 ; X64-O0-NEXT: movl var_13, %edx
422 ; X64-O0-NEXT: xorl $-1, %edx
423 ; X64-O0-NEXT: xorl var_16, %edx
424 ; X64-O0-NEXT: movl %edx, %edx
425 ; X64-O0-NEXT: # kill: def $rdx killed $edx
426 ; X64-O0-NEXT: andq %rdx, %rcx
427 ; X64-O0-NEXT: orq %rcx, %rax
428 ; X64-O0-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
429 ; X64-O0-NEXT: movl var_13, %eax
430 ; X64-O0-NEXT: xorl $-1, %eax
431 ; X64-O0-NEXT: movl %eax, %eax
432 ; X64-O0-NEXT: # kill: def $rax killed $eax
433 ; X64-O0-NEXT: cmpl $0, var_13
434 ; X64-O0-NEXT: setne %cl
435 ; X64-O0-NEXT: xorb $-1, %cl
436 ; X64-O0-NEXT: andb $1, %cl
437 ; X64-O0-NEXT: movzbl %cl, %ecx
438 ; X64-O0-NEXT: # kill: def $rcx killed $ecx
439 ; X64-O0-NEXT: andq $0, %rcx
440 ; X64-O0-NEXT: orq %rcx, %rax
441 ; X64-O0-NEXT: # kill: def $eax killed $eax killed $rax
442 ; X64-O0-NEXT: movl %eax, var_46
446 ; X64: # %bb.0: # %entry
447 ; X64-NEXT: movl var_13(%rip), %eax
448 ; X64-NEXT: xorl %ecx, %ecx
449 ; X64-NEXT: testl %eax, %eax
450 ; X64-NEXT: notl %eax
452 ; X64-NEXT: movl var_16(%rip), %edx
453 ; X64-NEXT: xorl %eax, %edx
454 ; X64-NEXT: andl %edx, %ecx
455 ; X64-NEXT: orl %eax, %ecx
456 ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
457 ; X64-NEXT: movl %eax, var_46(%rip)
461 ; X86-O0: # %bb.0: # %entry
462 ; X86-O0-NEXT: pushl %ebp
463 ; X86-O0-NEXT: .cfi_def_cfa_offset 8
464 ; X86-O0-NEXT: .cfi_offset %ebp, -8
465 ; X86-O0-NEXT: movl %esp, %ebp
466 ; X86-O0-NEXT: .cfi_def_cfa_register %ebp
467 ; X86-O0-NEXT: pushl %esi
468 ; X86-O0-NEXT: andl $-8, %esp
469 ; X86-O0-NEXT: subl $16, %esp
470 ; X86-O0-NEXT: .cfi_offset %esi, -12
471 ; X86-O0-NEXT: movl var_13, %ecx
472 ; X86-O0-NEXT: movl %ecx, %eax
473 ; X86-O0-NEXT: notl %eax
474 ; X86-O0-NEXT: testl %ecx, %ecx
475 ; X86-O0-NEXT: sete %cl
476 ; X86-O0-NEXT: movzbl %cl, %ecx
477 ; X86-O0-NEXT: movl var_16, %esi
478 ; X86-O0-NEXT: movl %eax, %edx
479 ; X86-O0-NEXT: xorl %esi, %edx
480 ; X86-O0-NEXT: andl %edx, %ecx
481 ; X86-O0-NEXT: orl %ecx, %eax
482 ; X86-O0-NEXT: movl %eax, (%esp)
483 ; X86-O0-NEXT: movl $0, {{[0-9]+}}(%esp)
484 ; X86-O0-NEXT: movl var_13, %eax
485 ; X86-O0-NEXT: notl %eax
486 ; X86-O0-NEXT: movl %eax, var_46
487 ; X86-O0-NEXT: leal -4(%ebp), %esp
488 ; X86-O0-NEXT: popl %esi
489 ; X86-O0-NEXT: popl %ebp
490 ; X86-O0-NEXT: .cfi_def_cfa %esp, 4
494 ; X86: # %bb.0: # %entry
495 ; X86-NEXT: pushl %ebp
496 ; X86-NEXT: .cfi_def_cfa_offset 8
497 ; X86-NEXT: .cfi_offset %ebp, -8
498 ; X86-NEXT: movl %esp, %ebp
499 ; X86-NEXT: .cfi_def_cfa_register %ebp
500 ; X86-NEXT: andl $-8, %esp
501 ; X86-NEXT: subl $8, %esp
502 ; X86-NEXT: movl var_13, %ecx
503 ; X86-NEXT: xorl %eax, %eax
504 ; X86-NEXT: testl %ecx, %ecx
505 ; X86-NEXT: notl %ecx
507 ; X86-NEXT: movl var_16, %edx
508 ; X86-NEXT: xorl %ecx, %edx
509 ; X86-NEXT: andl %eax, %edx
510 ; X86-NEXT: orl %ecx, %edx
511 ; X86-NEXT: movl %edx, (%esp)
512 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
513 ; X86-NEXT: movl %ecx, var_46
514 ; X86-NEXT: movl %ebp, %esp
515 ; X86-NEXT: popl %ebp
516 ; X86-NEXT: .cfi_def_cfa %esp, 4
519 %a = alloca i64, align 8
520 %0 = load i32, i32* @var_13, align 4
521 %neg = xor i32 %0, -1
522 %conv = zext i32 %neg to i64
523 %1 = load i32, i32* @var_13, align 4
524 %tobool = icmp ne i32 %1, 0
525 %lnot = xor i1 %tobool, true
526 %conv1 = zext i1 %lnot to i64
527 %2 = load i32, i32* @var_13, align 4
528 %neg2 = xor i32 %2, -1
529 %3 = load i32, i32* @var_16, align 4
530 %xor = xor i32 %neg2, %3
531 %conv3 = zext i32 %xor to i64
532 %and = and i64 %conv1, %conv3
533 %or = or i64 %conv, %and
534 store i64 %or, i64* %a, align 8
535 %4 = load i32, i32* @var_13, align 4
536 %neg4 = xor i32 %4, -1
537 %conv5 = zext i32 %neg4 to i64
538 %5 = load i32, i32* @var_13, align 4
539 %tobool6 = icmp ne i32 %5, 0
540 %lnot7 = xor i1 %tobool6, true
541 %conv8 = zext i1 %lnot7 to i64
542 %and9 = and i64 %conv8, 0
543 %or10 = or i64 %conv5, %and9
544 %conv11 = trunc i64 %or10 to i32
545 store i32 %conv11, i32* @var_46, align 4