1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -disable-cgp-select2branch -x86-cmov-converter=false | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
5 define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
7 ; CHECK: # %bb.0: # %entry
8 ; CHECK-NEXT: btl %esi, %edi
9 ; CHECK-NEXT: movl $12, %eax
10 ; CHECK-NEXT: cmovael (%rcx), %eax
15 %toBool = icmp eq i32 %1, 0
16 %v = load i32, i32* %vp
17 %.0 = select i1 %toBool, i32 %v, i32 12
21 define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
23 ; CHECK: # %bb.0: # %entry
24 ; CHECK-NEXT: btl %esi, %edi
25 ; CHECK-NEXT: movl $12, %eax
26 ; CHECK-NEXT: cmovbl (%rcx), %eax
31 %toBool = icmp eq i32 %1, 0
32 %v = load i32, i32* %vp
33 %.0 = select i1 %toBool, i32 12, i32 %v
38 ; x86's 32-bit cmov zeroes the high 32 bits of the destination. Make
39 ; sure CodeGen takes advantage of that to avoid an unnecessary
40 ; zero-extend (movl) after the cmov.
42 declare void @bar(i64) nounwind
44 define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
47 ; CHECK-NEXT: pushq %rax
48 ; CHECK-NEXT: testb $1, %dl
49 ; CHECK-NEXT: cmovel %esi, %edi
50 ; CHECK-NEXT: callq bar
51 ; CHECK-NEXT: popq %rax
53 %c = trunc i64 %a to i32
54 %d = trunc i64 %b to i32
55 %e = select i1 %p, i32 %c, i32 %d
56 %f = zext i32 %e to i64
57 call void @bar(i64 %f)
63 ; CodeGen shouldn't try to do a setne after an expanded 8-bit conditional
64 ; move without recomputing EFLAGS, because the expansion of the conditional
65 ; move with control flow may clobber EFLAGS (e.g., with xor, to set the
68 ; The test is a little awkward; the important part is that there's a test before the
73 @g_3 = external global i8
74 @g_96 = external global i8
75 @g_100 = external global i8
76 @_2E_str = external constant [15 x i8], align 1
78 define i1 @test4() nounwind {
80 ; CHECK: # %bb.0: # %entry
81 ; CHECK-NEXT: movsbl {{.*}}(%rip), %edx
82 ; CHECK-NEXT: movzbl %dl, %ecx
83 ; CHECK-NEXT: shrl $7, %ecx
84 ; CHECK-NEXT: xorb $1, %cl
85 ; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
86 ; CHECK-NEXT: sarl %cl, %edx
87 ; CHECK-NEXT: movb {{.*}}(%rip), %al
88 ; CHECK-NEXT: testb %al, %al
89 ; CHECK-NEXT: je .LBB3_2
90 ; CHECK-NEXT: # %bb.1: # %bb.i.i.i
91 ; CHECK-NEXT: movb {{.*}}(%rip), %cl
92 ; CHECK-NEXT: .LBB3_2: # %func_4.exit.i
93 ; CHECK-NEXT: pushq %rbx
94 ; CHECK-NEXT: testb %dl, %dl
95 ; CHECK-NEXT: setne %bl
96 ; CHECK-NEXT: movl %eax, %ecx
97 ; CHECK-NEXT: je .LBB3_4
98 ; CHECK-NEXT: # %bb.3: # %func_4.exit.i
99 ; CHECK-NEXT: xorl %ecx, %ecx
100 ; CHECK-NEXT: .LBB3_4: # %func_4.exit.i
101 ; CHECK-NEXT: testb %al, %al
102 ; CHECK-NEXT: je .LBB3_7
103 ; CHECK-NEXT: # %bb.5: # %func_4.exit.i
104 ; CHECK-NEXT: testb %bl, %bl
105 ; CHECK-NEXT: jne .LBB3_7
106 ; CHECK-NEXT: # %bb.6: # %bb.i.i
107 ; CHECK-NEXT: movb {{.*}}(%rip), %cl
108 ; CHECK-NEXT: xorl %ebx, %ebx
109 ; CHECK-NEXT: movl %eax, %ecx
110 ; CHECK-NEXT: .LBB3_7: # %func_1.exit
111 ; CHECK-NEXT: movb %cl, {{.*}}(%rip)
112 ; CHECK-NEXT: movzbl %cl, %esi
113 ; CHECK-NEXT: movl $_2E_str, %edi
114 ; CHECK-NEXT: xorl %eax, %eax
115 ; CHECK-NEXT: callq printf
116 ; CHECK-NEXT: movl %ebx, %eax
117 ; CHECK-NEXT: popq %rbx
120 %0 = load i8, i8* @g_3, align 1
121 %1 = sext i8 %0 to i32
122 %.lobit.i = lshr i8 %0, 7
123 %tmp.i = zext i8 %.lobit.i to i32
124 %tmp.not.i = xor i32 %tmp.i, 1
125 %iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i
126 %retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8
127 %2 = icmp eq i8 %retval56.i.i, 0
128 %g_96.promoted.i = load i8, i8* @g_96
129 %3 = icmp eq i8 %g_96.promoted.i, 0
130 br i1 %3, label %func_4.exit.i, label %bb.i.i.i
133 %4 = load volatile i8, i8* @g_100, align 1
134 br label %func_4.exit.i
137 %.not.i = xor i1 %2, true
138 %brmerge.i = or i1 %3, %.not.i
139 %.mux.i = select i1 %2, i8 %g_96.promoted.i, i8 0
140 br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
143 %5 = load volatile i8, i8* @g_100, align 1
144 br label %func_1.exit
147 %g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ]
148 %ret = phi i1 [ 0, %bb.i.i ], [ %.not.i, %func_4.exit.i ]
149 store i8 %g_96.tmp.0.i, i8* @g_96
150 %6 = zext i8 %g_96.tmp.0.i to i32
151 %7 = tail call i32 (i8*, ...) @printf(i8* noalias getelementptr ([15 x i8], [15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind
155 declare i32 @printf(i8* nocapture, ...) nounwind
158 ; Should compile to setcc | -2.
160 define i32 @test5(i32* nocapture %P) nounwind readonly {
161 ; CHECK-LABEL: test5:
162 ; CHECK: # %bb.0: # %entry
163 ; CHECK-NEXT: xorl %eax, %eax
164 ; CHECK-NEXT: cmpl $41, (%rdi)
165 ; CHECK-NEXT: setg %al
166 ; CHECK-NEXT: orl $-2, %eax
169 %0 = load i32, i32* %P, align 4
170 %1 = icmp sgt i32 %0, 41
171 %iftmp.0.0 = select i1 %1, i32 -1, i32 -2
175 define i32 @test6(i32* nocapture %P) nounwind readonly {
176 ; CHECK-LABEL: test6:
177 ; CHECK: # %bb.0: # %entry
178 ; CHECK-NEXT: xorl %eax, %eax
179 ; CHECK-NEXT: cmpl $42, (%rdi)
180 ; CHECK-NEXT: setl %al
181 ; CHECK-NEXT: leal 4(%rax,%rax,8), %eax
184 %0 = load i32, i32* %P, align 4
185 %1 = icmp sgt i32 %0, 41
186 %iftmp.0.0 = select i1 %1, i32 4, i32 13
191 ; Don't try to use a 16-bit conditional move to do an 8-bit select,
192 ; because it isn't worth it. Just use a branch instead.
193 define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
194 ; CHECK-LABEL: test7:
196 ; CHECK-NEXT: testb $1, %dil
197 ; CHECK-NEXT: jne .LBB6_1
198 ; CHECK-NEXT: # %bb.2:
199 ; CHECK-NEXT: movl %edx, %eax
200 ; CHECK-NEXT: # kill: def $al killed $al killed $eax
202 ; CHECK-NEXT: .LBB6_1:
203 ; CHECK-NEXT: movl %esi, %eax
204 ; CHECK-NEXT: # kill: def $al killed $al killed $eax
206 %d = select i1 %c, i8 %a, i8 %b
210 define i32 @smin(i32 %x) {
213 ; CHECK-NEXT: notl %edi
214 ; CHECK-NEXT: testl %edi, %edi
215 ; CHECK-NEXT: movl $-1, %eax
216 ; CHECK-NEXT: cmovsl %edi, %eax
218 %not_x = xor i32 %x, -1
219 %1 = icmp slt i32 %not_x, -1
220 %sel = select i1 %1, i32 %not_x, i32 -1