1 ; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -asm-verbose=false -post-RA-scheduler=true | FileCheck %s
13 ; BranchFolding should tail-merge the stores since they all precede
14 ; direct branches to the same place.
16 ; CHECK: tail_merge_me:
18 ; CHECK: movl $0, GHJK(%rip)
19 ; CHECK-NEXT: movl $1, HABC(%rip)
22 define void @tail_merge_me() nounwind {
25 br i1 %a, label %A, label %next
28 br i1 %b, label %B, label %C
32 store i32 0, i32* @GHJK
37 store i32 0, i32* @GHJK
42 store i32 0, i32* @GHJK
46 store i32 1, i32* @HABC
48 br i1 %c, label %return, label %altret
51 call void @ear(i32 1000)
54 call void @far(i32 1001)
58 declare i8* @choose(i8*, i8*)
60 ; BranchFolding should tail-duplicate the indirect jump to avoid
61 ; redundant branching.
63 ; CHECK: tail_duplicate_me:
64 ; CHECK: movl $0, GHJK(%rip)
65 ; CHECK-NEXT: jmpq *%r
66 ; CHECK: movl $0, GHJK(%rip)
67 ; CHECK-NEXT: jmpq *%r
68 ; CHECK: movl $0, GHJK(%rip)
69 ; CHECK-NEXT: jmpq *%r
71 define void @tail_duplicate_me() nounwind {
74 %c = call i8* @choose(i8* blockaddress(@tail_duplicate_me, %return),
75 i8* blockaddress(@tail_duplicate_me, %altret))
76 br i1 %a, label %A, label %next
79 br i1 %b, label %B, label %C
83 store i32 0, i32* @GHJK
88 store i32 0, i32* @GHJK
93 store i32 0, i32* @GHJK
97 indirectbr i8* %c, [label %return, label %altret]
100 call void @ear(i32 1000)
103 call void @far(i32 1001)
107 ; BranchFolding shouldn't try to merge the tails of two blocks
108 ; with only a branch in common, regardless of the fallthrough situation.
110 ; CHECK: dont_merge_oddly:
112 ; CHECK: ucomiss %xmm1, %xmm2
113 ; CHECK-NEXT: jbe .LBB2_3
114 ; CHECK-NEXT: ucomiss %xmm0, %xmm1
115 ; CHECK-NEXT: ja .LBB2_4
116 ; CHECK-NEXT: .LBB2_2:
117 ; CHECK-NEXT: movb $1, %al
119 ; CHECK-NEXT: .LBB2_3:
120 ; CHECK-NEXT: ucomiss %xmm0, %xmm2
121 ; CHECK-NEXT: jbe .LBB2_2
122 ; CHECK-NEXT: .LBB2_4:
123 ; CHECK-NEXT: xorb %al, %al
126 define i1 @dont_merge_oddly(float* %result) nounwind {
128 %tmp4 = getelementptr float* %result, i32 2
129 %tmp5 = load float* %tmp4, align 4
130 %tmp7 = getelementptr float* %result, i32 4
131 %tmp8 = load float* %tmp7, align 4
132 %tmp10 = getelementptr float* %result, i32 6
133 %tmp11 = load float* %tmp10, align 4
134 %tmp12 = fcmp olt float %tmp8, %tmp11
135 br i1 %tmp12, label %bb, label %bb21
138 %tmp23469 = fcmp olt float %tmp5, %tmp8
139 br i1 %tmp23469, label %bb26, label %bb30
142 %tmp23 = fcmp olt float %tmp5, %tmp11
143 br i1 %tmp23, label %bb26, label %bb30
152 ; Do any-size tail-merging when two candidate blocks will both require
153 ; an unconditional jump to complete a two-way conditional branch.
155 ; CHECK: c_expand_expr_stmt:
156 ; CHECK: jmp .LBB3_11
157 ; CHECK-NEXT: .LBB3_9:
158 ; CHECK-NEXT: movq 8(%rax), %rax
159 ; CHECK-NEXT: xorb %dl, %dl
160 ; CHECK-NEXT: movb 16(%rax), %al
161 ; CHECK-NEXT: cmpb $16, %al
162 ; CHECK-NEXT: je .LBB3_11
163 ; CHECK-NEXT: cmpb $23, %al
164 ; CHECK-NEXT: jne .LBB3_14
165 ; CHECK-NEXT: .LBB3_11:
167 %0 = type { %struct.rtx_def* }
168 %struct.lang_decl = type opaque
169 %struct.rtx_def = type { i16, i8, i8, [1 x %union.rtunion] }
170 %struct.tree_decl = type { [24 x i8], i8*, i32, %union.tree_node*, i32, i8, i8, i8, i8, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %union..2anon, %0, %union.tree_node*, %struct.lang_decl* }
171 %union..2anon = type { i32 }
172 %union.rtunion = type { i8* }
173 %union.tree_node = type { %struct.tree_decl }
175 define fastcc void @c_expand_expr_stmt(%union.tree_node* %expr) nounwind {
177 %tmp4 = load i8* null, align 8 ; <i8> [#uses=3]
178 switch i8 %tmp4, label %bb3 [
183 switch i32 undef, label %bb1 [
189 switch i32 undef, label %bb1 [
190 i32 0, label %lvalue_p.exit
196 lvalue_p.exit: ; preds = %bb.i
197 %tmp21 = load %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3]
198 %tmp22 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1]
199 %tmp23 = load i8* %tmp22, align 8 ; <i8> [#uses=1]
200 %tmp24 = zext i8 %tmp23 to i32 ; <i32> [#uses=1]
201 switch i32 %tmp24, label %lvalue_p.exit4 [
206 bb.i1: ; preds = %lvalue_p.exit
207 %tmp25 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1]
208 %tmp26 = bitcast i32* %tmp25 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
209 %tmp27 = load %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2]
210 %tmp28 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
211 %tmp29 = load i8* %tmp28, align 8 ; <i8> [#uses=1]
212 %tmp30 = zext i8 %tmp29 to i32 ; <i32> [#uses=1]
213 switch i32 %tmp30, label %lvalue_p.exit4 [
214 i32 0, label %bb2.i.i2
218 bb.i.i: ; preds = %bb.i1
219 %tmp34 = tail call fastcc i32 @lvalue_p(%union.tree_node* null) nounwind ; <i32> [#uses=1]
220 %phitmp = icmp ne i32 %tmp34, 0 ; <i1> [#uses=1]
221 br label %lvalue_p.exit4
223 bb2.i.i2: ; preds = %bb.i1
224 %tmp35 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
225 %tmp36 = bitcast i8* %tmp35 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
226 %tmp37 = load %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1]
227 %tmp38 = getelementptr inbounds %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
228 %tmp39 = load i8* %tmp38, align 8 ; <i8> [#uses=1]
229 switch i8 %tmp39, label %bb2 [
230 i8 16, label %lvalue_p.exit4
231 i8 23, label %lvalue_p.exit4
234 bb2.i3: ; preds = %lvalue_p.exit
235 %tmp40 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
236 %tmp41 = bitcast i8* %tmp40 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
237 %tmp42 = load %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1]
238 %tmp43 = getelementptr inbounds %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
239 %tmp44 = load i8* %tmp43, align 8 ; <i8> [#uses=1]
240 switch i8 %tmp44, label %bb2 [
241 i8 16, label %lvalue_p.exit4
242 i8 23, label %lvalue_p.exit4
245 lvalue_p.exit4: ; preds = %bb2.i3, %bb2.i3, %bb2.i.i2, %bb2.i.i2, %bb.i.i, %bb.i1, %lvalue_p.exit
246 %tmp45 = phi i1 [ %phitmp, %bb.i.i ], [ false, %bb2.i.i2 ], [ false, %bb2.i.i2 ], [ false, %bb.i1 ], [ false, %bb2.i3 ], [ false, %bb2.i3 ], [ false, %lvalue_p.exit ] ; <i1> [#uses=1]
247 %tmp46 = icmp eq i8 %tmp4, 0 ; <i1> [#uses=1]
248 %or.cond = or i1 %tmp45, %tmp46 ; <i1> [#uses=1]
249 br i1 %or.cond, label %bb2, label %bb3
251 bb1: ; preds = %bb2.i.i, %bb.i, %bb
252 %.old = icmp eq i8 %tmp4, 23 ; <i1> [#uses=1]
253 br i1 %.old, label %bb2, label %bb3
255 bb2: ; preds = %bb1, %lvalue_p.exit4, %bb2.i3, %bb2.i.i2
258 bb3: ; preds = %bb2, %bb1, %lvalue_p.exit4, %bb2.i, %entry
259 %expr_addr.0 = phi %union.tree_node* [ null, %bb2 ], [ %expr, %bb2.i ], [ %expr, %entry ], [ %expr, %bb1 ], [ %expr, %lvalue_p.exit4 ] ; <%union.tree_node*> [#uses=0]
263 declare fastcc i32 @lvalue_p(%union.tree_node* nocapture) nounwind readonly
265 declare fastcc %union.tree_node* @default_conversion(%union.tree_node*) nounwind
268 ; If one tail merging candidate falls through into the other,
269 ; tail merging is likely profitable regardless of how few
270 ; instructions are involved. This function should have only
271 ; one ret instruction.
275 ; CHECK-NEXT: .LBB4_2:
276 ; CHECK-NEXT: addq $8, %rsp
279 define void @foo(i1* %V) nounwind {
281 %t0 = icmp eq i1* %V, null
282 br i1 %t0, label %return, label %bb
294 ; one - One instruction may be tail-duplicated even with optsize.
297 ; CHECK: movl $0, XYZ(%rip)
298 ; CHECK: movl $0, XYZ(%rip)
300 @XYZ = external global i32
302 define void @one() nounwind optsize {
304 %0 = icmp eq i32 undef, 0
305 br i1 %0, label %bbx, label %bby
308 switch i32 undef, label %bb7 [
309 i32 16, label %return
313 volatile store i32 0, i32* @XYZ
317 switch i32 undef, label %bb12 [
318 i32 128, label %return
322 volatile store i32 0, i32* @XYZ
329 ; two - Same as one, but with two instructions in the common
330 ; tail instead of one. This is too much to be merged, given
331 ; the optsize attribute.
335 ; CHECK: movl $0, XYZ(%rip)
336 ; CHECK: movl $1, XYZ(%rip)
340 define void @two() nounwind optsize {
342 %0 = icmp eq i32 undef, 0
343 br i1 %0, label %bbx, label %bby
346 switch i32 undef, label %bb7 [
347 i32 16, label %return
351 volatile store i32 0, i32* @XYZ
352 volatile store i32 1, i32* @XYZ
356 switch i32 undef, label %bb12 [
357 i32 128, label %return
361 volatile store i32 0, i32* @XYZ
362 volatile store i32 1, i32* @XYZ
369 ; two_nosize - Same as two, but without the optsize attribute.
370 ; Now two instructions are enough to be tail-duplicated.
373 ; CHECK: movl $0, XYZ(%rip)
374 ; CHECK: movl $1, XYZ(%rip)
375 ; CHECK: movl $0, XYZ(%rip)
376 ; CHECK: movl $1, XYZ(%rip)
378 define void @two_nosize() nounwind {
380 %0 = icmp eq i32 undef, 0
381 br i1 %0, label %bbx, label %bby
384 switch i32 undef, label %bb7 [
385 i32 16, label %return
389 volatile store i32 0, i32* @XYZ
390 volatile store i32 1, i32* @XYZ
394 switch i32 undef, label %bb12 [
395 i32 128, label %return
399 volatile store i32 0, i32* @XYZ
400 volatile store i32 1, i32* @XYZ
407 ; Tail-merging should merge the two ret instructions since one side
408 ; can fall-through into the ret and the other side has to branch anyway.
412 ; CHECK-NEXT: LBB8_2:
415 define i64 @TESTE(i64 %parami, i64 %paraml) nounwind readnone {
417 %cmp = icmp slt i64 %parami, 1 ; <i1> [#uses=1]
418 %varx.0 = select i1 %cmp, i64 1, i64 %parami ; <i64> [#uses=1]
419 %cmp410 = icmp slt i64 %paraml, 1 ; <i1> [#uses=1]
420 br i1 %cmp410, label %for.end, label %bb.nph
422 bb.nph: ; preds = %entry
423 %tmp15 = mul i64 %paraml, %parami ; <i64> [#uses=1]
426 for.end: ; preds = %entry