1 ; RUN: llc %s -o - -enable-shrink-wrap=true -pass-remarks-output=%t | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
2 ; RUN: cat %t | FileCheck %s --check-prefix=REMARKS
3 ; RUN: llc %s -o - -enable-shrink-wrap=false | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
5 ; Note: Lots of tests use inline asm instead of regular calls.
6 ; This allows to have a better control on what the allocation will do.
7 ; Otherwise, we may have spill right in the entry block, defeating
8 ; shrink-wrapping. Moreover, some of the inline asm statement (nop)
9 ; are here to ensure that the related paths do not end up as critical
11 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
12 target triple = "x86_64-apple-macosx"
15 ; Initial motivating example: Simple diamond with a call just on one side.
18 ; Compare the arguments and jump to exit.
20 ; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
21 ; ENABLE-NEXT: cmpl %esi, %edi
22 ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
25 ; (What we push does not matter. It should be some random sratch register.)
28 ; Compare the arguments and jump to exit.
29 ; After the prologue is set.
30 ; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
31 ; DISABLE-NEXT: cmpl %esi, %edi
32 ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
34 ; Store %a in the alloca.
35 ; CHECK: movl [[ARG0CPY]], 4(%rsp)
36 ; Set the alloca address in the second argument.
37 ; CHECK-NEXT: leaq 4(%rsp), %rsi
38 ; Set the first argument to zero.
39 ; CHECK-NEXT: xorl %edi, %edi
40 ; CHECK-NEXT: callq _doSomething
42 ; With shrink-wrapping, epilogue is just after the call.
43 ; ENABLE-NEXT: addq $8, %rsp
45 ; CHECK: [[EXIT_LABEL]]:
47 ; Without shrink-wrapping, epilogue is in the exit block.
48 ; Epilogue code. (What we pop does not matter.)
52 define i32 @foo(i32 %a, i32 %b) {
53 %tmp = alloca i32, align 4
54 %tmp2 = icmp slt i32 %a, %b
55 br i1 %tmp2, label %true, label %false
58 store i32 %a, i32* %tmp, align 4
59 %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
63 %tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
67 ; Function Attrs: optsize
68 declare i32 @doSomething(i32, i32*)
71 ; Check that we do not perform the restore inside the loop whereas the save
73 ; CHECK-LABEL: freqSaveAndRestoreOutsideLoop:
75 ; Shrink-wrapping allows to skip the prologue in the else case.
76 ; ENABLE: testl %edi, %edi
77 ; ENABLE: je [[ELSE_LABEL:LBB[0-9_]+]]
80 ; Make sure we save the CSR used in the inline asm: rbx.
83 ; DISABLE: testl %edi, %edi
84 ; DISABLE: je [[ELSE_LABEL:LBB[0-9_]+]]
86 ; CHECK: xorl [[SUM:%eax]], [[SUM]]
87 ; CHECK-NEXT: movl $10, [[IV:%e[a-z]+]]
90 ; CHECK: [[LOOP:LBB[0-9_]+]]: ## %for.body
91 ; CHECK: movl $1, [[TMP:%e[a-z]+]]
92 ; CHECK: addl [[TMP]], [[SUM]]
93 ; CHECK-NEXT: decl [[IV]]
94 ; CHECK-NEXT: jne [[LOOP]]
98 ; CHECK: shll $3, [[SUM]]
103 ; DISABLE: [[ELSE_LABEL]]: ## %if.else
104 ; Shift second argument by one in returned register.
105 ; DISABLE: movl %esi, %eax
106 ; DISABLE: addl %esi, %eax
109 ; CHECK-DAG: popq %rbx
112 ; ENABLE: [[ELSE_LABEL]]: ## %if.else
113 ; Shift second argument by one and store into returned register.
114 ; ENABLE: movl %esi, %eax
115 ; ENABLE: addl %esi, %eax
117 define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
119 %tobool = icmp eq i32 %cond, 0
120 br i1 %tobool, label %if.else, label %for.preheader
123 tail call void asm "nop", ""()
126 for.body: ; preds = %entry, %for.body
127 %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
128 %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
129 %call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
130 %add = add nsw i32 %call, %sum.04
131 %inc = add nuw nsw i32 %i.05, 1
132 %exitcond = icmp eq i32 %inc, 10
133 br i1 %exitcond, label %for.end, label %for.body
135 for.end: ; preds = %for.body
136 %shl = shl i32 %add, 3
139 if.else: ; preds = %entry
140 %mul = shl nsw i32 %N, 1
143 if.end: ; preds = %if.else, %for.end
144 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
148 declare i32 @something(...)
150 ; Check that we do not perform the shrink-wrapping inside the loop even
151 ; though that would be legal. The cost model must prevent that.
152 ; CHECK-LABEL: freqSaveAndRestoreOutsideLoop2:
154 ; Make sure we save the CSR used in the inline asm: rbx.
157 ; CHECK: xorl [[SUM:%e[a-z]+]], [[SUM]]
158 ; CHECK-NEXT: movl $10, [[IV:%e[a-z]+]]
160 ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ## %for.body
161 ; CHECK: movl $1, [[TMP:%e[a-z]+]]
162 ; CHECK: addl [[TMP]], [[SUM]]
163 ; CHECK-NEXT: decl [[IV]]
164 ; CHECK-NEXT: jne [[LOOP_LABEL]]
166 ; CHECK: ## %for.exit
170 define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
172 br label %for.preheader
175 tail call void asm "nop", ""()
178 for.body: ; preds = %for.body, %entry
179 %i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
180 %sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
181 %call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
182 %add = add nsw i32 %call, %sum.03
183 %inc = add nuw nsw i32 %i.04, 1
184 %exitcond = icmp eq i32 %inc, 10
185 br i1 %exitcond, label %for.exit, label %for.body
188 tail call void asm "nop", ""()
191 for.end: ; preds = %for.body
195 ; Check with a more complex case that we do not have save within the loop and
197 ; CHECK-LABEL: loopInfoSaveOutsideLoop:
199 ; ENABLE: testl %edi, %edi
200 ; ENABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
203 ; Make sure we save the CSR used in the inline asm: rbx.
206 ; DISABLE: testl %edi, %edi
207 ; DISABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
210 ; CHECK: xorl [[SUM:%eax]], [[SUM]]
211 ; CHECK-NEXT: movl $10, [[IV:%e[a-z]+]]
213 ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ## %for.body
214 ; CHECK: movl $1, [[TMP:%e[a-z]+]]
215 ; CHECK: addl [[TMP]], [[SUM]]
216 ; CHECK-NEXT: decl [[IV]]
217 ; CHECK-NEXT: jne [[LOOP_LABEL]]
220 ; CHECK: shll $3, [[SUM]]
225 ; DISABLE: [[ELSE_LABEL]]: ## %if.else
226 ; Shift second argument by one in returned register.
227 ; DISABLE: movl %esi, %eax
228 ; DISABLE: addl %esi, %eax
231 ; CHECK-DAG: popq %rbx
234 ; ENABLE: [[ELSE_LABEL]]: ## %if.else
235 ; Shift second argument by one and store into returned register.
236 ; ENABLE: movl %esi, %eax
237 ; ENABLE: addl %esi, %eax
239 define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
241 %tobool = icmp eq i32 %cond, 0
242 br i1 %tobool, label %if.else, label %for.preheader
245 tail call void asm "nop", ""()
248 for.body: ; preds = %entry, %for.body
249 %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
250 %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
251 %call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
252 %add = add nsw i32 %call, %sum.04
253 %inc = add nuw nsw i32 %i.05, 1
254 %exitcond = icmp eq i32 %inc, 10
255 br i1 %exitcond, label %for.end, label %for.body
257 for.end: ; preds = %for.body
258 tail call void asm "nop", "~{ebx}"()
259 %shl = shl i32 %add, 3
262 if.else: ; preds = %entry
263 %mul = shl nsw i32 %N, 1
266 if.end: ; preds = %if.else, %for.end
267 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
271 ; Check with a more complex case that we do not have restore within the loop and
273 ; CHECK-LABEL: loopInfoRestoreOutsideLoop:
275 ; ENABLE: testl %edi, %edi
276 ; ENABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
279 ; Make sure we save the CSR used in the inline asm: rbx.
282 ; DISABLE: testl %edi, %edi
283 ; DISABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
286 ; CHECK: xorl [[SUM:%eax]], [[SUM]]
287 ; CHECK-NEXT: movl $10, [[IV:%e[a-z]+]]
289 ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ## %for.body
290 ; CHECK: movl $1, [[TMP:%e[a-z]+]]
291 ; CHECK: addl [[TMP]], [[SUM]]
292 ; CHECK-NEXT: decl [[IV]]
293 ; CHECK-NEXT: jne [[LOOP_LABEL]]
295 ; CHECK: shll $3, [[SUM]]
300 ; DISABLE: [[ELSE_LABEL]]: ## %if.else
302 ; Shift second argument by one in returned register.
303 ; DISABLE: movl %esi, %eax
304 ; DISABLE: addl %esi, %eax
307 ; CHECK-DAG: popq %rbx
310 ; ENABLE: [[ELSE_LABEL]]: ## %if.else
311 ; Shift second argument by one and store into returned register.
312 ; ENABLE: movl %esi, %eax
313 ; ENABLE: addl %esi, %eax
315 define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
317 %tobool = icmp eq i32 %cond, 0
318 br i1 %tobool, label %if.else, label %if.then
320 if.then: ; preds = %entry
321 tail call void asm "nop", "~{ebx}"()
324 for.body: ; preds = %for.body, %if.then
325 %i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
326 %sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
327 %call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
328 %add = add nsw i32 %call, %sum.04
329 %inc = add nuw nsw i32 %i.05, 1
330 %exitcond = icmp eq i32 %inc, 10
331 br i1 %exitcond, label %for.end, label %for.body
333 for.end: ; preds = %for.body
334 %shl = shl i32 %add, 3
337 if.else: ; preds = %entry
338 %mul = shl nsw i32 %N, 1
341 if.end: ; preds = %if.else, %for.end
342 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
346 ; Check that we handle function with no frame information correctly.
347 ; CHECK-LABEL: emptyFrame:
349 ; CHECK-NEXT: xorl %eax, %eax
351 define i32 @emptyFrame() {
356 ; Check that we handle inline asm correctly.
357 ; CHECK-LABEL: inlineAsm:
359 ; ENABLE: testl %edi, %edi
360 ; ENABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
363 ; Make sure we save the CSR used in the inline asm: rbx.
366 ; DISABLE: testl %edi, %edi
367 ; DISABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
370 ; CHECK: movl $10, [[IV:%e[a-z]+]]
372 ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ## %for.body
373 ; Inline asm statement.
374 ; CHECK: addl $1, %ebx
376 ; CHECK-NEXT: jne [[LOOP_LABEL]]
379 ; CHECK: xorl %eax, %eax
384 ; DISABLE: [[ELSE_LABEL]]: ## %if.else
385 ; Shift second argument by one in returned register.
386 ; DISABLE: movl %esi, %eax
387 ; DISABLE: addl %esi, %eax
390 ; CHECK-DAG: popq %rbx
393 ; ENABLE: [[ELSE_LABEL]]: ## %if.else
394 ; Shift second argument by one and store into returned register.
395 ; ENABLE: movl %esi, %eax
396 ; ENABLE: addl %esi, %eax
398 define i32 @inlineAsm(i32 %cond, i32 %N) {
400 %tobool = icmp eq i32 %cond, 0
401 br i1 %tobool, label %if.else, label %for.preheader
404 tail call void asm "nop", ""()
407 for.body: ; preds = %entry, %for.body
408 %i.03 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
409 tail call void asm "addl $$1, %ebx", "~{ebx}"()
410 %inc = add nuw nsw i32 %i.03, 1
411 %exitcond = icmp eq i32 %inc, 10
412 br i1 %exitcond, label %for.exit, label %for.body
415 tail call void asm "nop", ""()
418 if.else: ; preds = %entry
419 %mul = shl nsw i32 %N, 1
422 if.end: ; preds = %for.body, %if.else
423 %sum.0 = phi i32 [ %mul, %if.else ], [ 0, %for.exit ]
427 ; Check that we handle calls to variadic functions correctly.
428 ; CHECK-LABEL: callVariadicFunc:
430 ; ENABLE: movl %esi, %eax
431 ; ENABLE: testl %edi, %edi
432 ; ENABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
437 ; DISABLE: movl %esi, %eax
438 ; DISABLE: testl %edi, %edi
439 ; DISABLE-NEXT: je [[ELSE_LABEL:LBB[0-9_]+]]
441 ; Setup of the varags.
442 ; CHECK: movl %eax, (%rsp)
443 ; CHECK-NEXT: movl %eax, %edi
444 ; CHECK-NEXT: movl %eax, %esi
445 ; CHECK-NEXT: movl %eax, %edx
446 ; CHECK-NEXT: movl %eax, %ecx
447 ; CHECK-NEXT: movl %eax, %r8d
448 ; CHECK-NEXT: movl %eax, %r9d
449 ; CHECK-NEXT: xorl %eax, %eax
450 ; CHECK-NEXT: callq _someVariadicFunc
451 ; CHECK-NEXT: shll $3, %eax
453 ; ENABLE-NEXT: addq $8, %rsp
457 ; CHECK: [[ELSE_LABEL]]: ## %if.else
458 ; Shift second argument by one and store into returned register.
459 ; CHECK: addl %eax, %eax
464 define i32 @callVariadicFunc(i32 %cond, i32 %N) {
466 %tobool = icmp eq i32 %cond, 0
467 br i1 %tobool, label %if.else, label %if.then
469 if.then: ; preds = %entry
470 %call = tail call i32 (i32, ...) @someVariadicFunc(i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N)
471 %shl = shl i32 %call, 3
474 if.else: ; preds = %entry
475 %mul = shl nsw i32 %N, 1
478 if.end: ; preds = %if.else, %if.then
479 %sum.0 = phi i32 [ %shl, %if.then ], [ %mul, %if.else ]
483 declare i32 @someVariadicFunc(i32, ...)
485 ; Check that we use LEA not to clobber EFLAGS.
486 %struct.temp_slot = type { %struct.temp_slot*, %struct.rtx_def*, %struct.rtx_def*, i32, i64, %union.tree_node*, %union.tree_node*, i8, i8, i32, i32, i64, i64 }
487 %union.tree_node = type { %struct.tree_decl }
488 %struct.tree_decl = type { %struct.tree_common, i8*, i32, i32, %union.tree_node*, i48, %union.anon, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %union.anon.1, %union.tree_node*, %union.tree_node*, %union.tree_node*, i64, %struct.lang_decl* }
489 %struct.tree_common = type { %union.tree_node*, %union.tree_node*, i32 }
490 %union.anon = type { i64 }
491 %union.anon.1 = type { %struct.function* }
492 %struct.function = type { %struct.eh_status*, %struct.stmt_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, i8*, %union.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.ix86_args, %struct.rtx_def*, %struct.rtx_def*, i8*, %struct.initial_value_struct*, i32, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i64, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32, %struct.rtx_def**, %struct.temp_slot*, i32, i32, i32, %struct.var_refs_queue*, i32, i32, i8*, %union.tree_node*, %struct.rtx_def*, i32, i32, %struct.machine_function*, i32, i32, %struct.language_function*, %struct.rtx_def*, i24 }
493 %struct.eh_status = type opaque
494 %struct.stmt_status = type opaque
495 %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
496 %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.sequence_stack*, i32, i32, i8*, i32, i8*, %union.tree_node**, %struct.rtx_def** }
497 %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.sequence_stack* }
498 %struct.varasm_status = type opaque
499 %struct.ix86_args = type { i32, i32, i32, i32, i32, i32, i32 }
500 %struct.initial_value_struct = type opaque
501 %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
502 %struct.machine_function = type opaque
503 %struct.language_function = type opaque
504 %struct.lang_decl = type opaque
505 %struct.rtx_def = type { i32, [1 x %union.rtunion_def] }
506 %union.rtunion_def = type { i64 }
508 declare hidden fastcc %struct.temp_slot* @find_temp_slot_from_address(%struct.rtx_def* readonly)
510 ; CHECK-LABEL: useLEA:
513 ; CHECK: testq %rdi, %rdi
514 ; CHECK-NEXT: je [[CLEANUP:LBB[0-9_]+]]
516 ; CHECK: cmpw $66, (%rdi)
517 ; CHECK-NEXT: jne [[CLEANUP]]
519 ; CHECK: movq 8(%rdi), %rdi
520 ; CHECK-NEXT: movzwl (%rdi), %e[[BF_LOAD2:[a-z]+]]
521 ; CHECK-NEXT: leal -54(%r[[BF_LOAD2]]), [[TMP:%e[a-z]+]]
522 ; CHECK-NEXT: cmpl $14, [[TMP]]
523 ; CHECK-NEXT: ja [[LOR_LHS_FALSE:LBB[0-9_]+]]
525 ; CHECK: movl $24599, [[TMP2:%e[a-z]+]]
526 ; CHECK-NEXT: btl [[TMP]], [[TMP2]]
527 ; CHECK-NEXT: jae [[LOR_LHS_FALSE:LBB[0-9_]+]]
529 ; CHECK: [[CLEANUP]]: ## %cleanup
533 ; CHECK: [[LOR_LHS_FALSE]]: ## %lor.lhs.false
534 ; CHECK: cmpl $134, %e[[BF_LOAD2]]
535 ; CHECK-NEXT: je [[CLEANUP]]
537 ; CHECK: cmpl $140, %e[[BF_LOAD2]]
538 ; CHECK-NEXT: je [[CLEANUP]]
541 ; CHECK: callq _find_temp_slot_from_address
542 ; CHECK-NEXT: testq %rax, %rax
544 ; The adjustment must use LEA here (or be moved above the test).
545 ; ENABLE-NEXT: leaq 8(%rsp), %rsp
547 ; CHECK-NEXT: je [[CLEANUP]]
549 ; CHECK: movb $1, 57(%rax)
550 define void @useLEA(%struct.rtx_def* readonly %x) {
552 %cmp = icmp eq %struct.rtx_def* %x, null
553 br i1 %cmp, label %cleanup, label %if.end
555 if.end: ; preds = %entry
556 %tmp = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %x, i64 0, i32 0
557 %bf.load = load i32, i32* %tmp, align 8
558 %bf.clear = and i32 %bf.load, 65535
559 %cmp1 = icmp eq i32 %bf.clear, 66
560 br i1 %cmp1, label %lor.lhs.false, label %cleanup
562 lor.lhs.false: ; preds = %if.end
563 %arrayidx = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %x, i64 0, i32 1, i64 0
564 %rtx = bitcast %union.rtunion_def* %arrayidx to %struct.rtx_def**
565 %tmp1 = load %struct.rtx_def*, %struct.rtx_def** %rtx, align 8
566 %tmp2 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %tmp1, i64 0, i32 0
567 %bf.load2 = load i32, i32* %tmp2, align 8
568 %bf.clear3 = and i32 %bf.load2, 65535
569 switch i32 %bf.clear3, label %if.end.55 [
570 i32 67, label %cleanup
571 i32 68, label %cleanup
572 i32 54, label %cleanup
573 i32 55, label %cleanup
574 i32 58, label %cleanup
575 i32 134, label %cleanup
576 i32 56, label %cleanup
577 i32 140, label %cleanup
580 if.end.55: ; preds = %lor.lhs.false
581 %call = tail call fastcc %struct.temp_slot* @find_temp_slot_from_address(%struct.rtx_def* %tmp1) #2
582 %cmp59 = icmp eq %struct.temp_slot* %call, null
583 br i1 %cmp59, label %cleanup, label %if.then.60
585 if.then.60: ; preds = %if.end.55
586 %addr_taken = getelementptr inbounds %struct.temp_slot, %struct.temp_slot* %call, i64 0, i32 8
587 store i8 1, i8* %addr_taken, align 1
590 cleanup: ; preds = %if.then.60, %if.end.55, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %if.end, %entry
594 ; Make sure we do not insert unreachable code after noreturn function.
595 ; Although this is not incorrect to insert such code, it is useless
596 ; and it hurts the binary size.
598 ; CHECK-LABEL: noreturn:
601 ; CHECK: testb %dil, %dil
602 ; CHECK-NEXT: jne [[ABORT:LBB[0-9_]+]]
604 ; CHECK: movl $42, %eax
610 ; CHECK: [[ABORT]]: ## %if.abort
614 ; CHECK: callq _abort
616 define i32 @noreturn(i8 signext %bad_thing) {
618 %tobool = icmp eq i8 %bad_thing, 0
619 br i1 %tobool, label %if.end, label %if.abort
622 tail call void @abort() #0
629 declare void @abort() #0
631 attributes #0 = { noreturn nounwind }
634 ; Make sure that we handle infinite loops properly When checking that the Save
635 ; and Restore blocks are control flow equivalent, the loop searches for the
636 ; immediate (post) dominator for the (restore) save blocks. When either the Save
637 ; or Restore block is located in an infinite loop the only immediate (post)
638 ; dominator is itself. In this case, we cannot perform shrink wrapping, but we
639 ; should return gracefully and continue compilation.
640 ; The only condition for this test is the compilation finishes correctly.
642 ; CHECK-LABEL: infiniteloop
644 define void @infiniteloop() {
646 br i1 undef, label %if.then, label %if.end
649 %ptr = alloca i32, i32 4
652 for.body: ; preds = %for.body, %entry
653 %sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
654 %call = tail call i32 asm "movl $$1, $0", "=r,~{ebx}"()
655 %add = add nsw i32 %call, %sum.03
656 store i32 %add, i32* %ptr
663 ; Another infinite loop test this time with a body bigger than just one block.
664 ; CHECK-LABEL: infiniteloop2
666 define void @infiniteloop2() {
668 br i1 undef, label %if.then, label %if.end
671 %ptr = alloca i32, i32 4
674 for.body: ; preds = %for.body, %entry
675 %sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
676 %call = tail call i32 asm "movl $$1, $0", "=r,~{ebx}"()
677 %add = add nsw i32 %call, %sum.03
678 store i32 %add, i32* %ptr
679 br i1 undef, label %body1, label %body2
682 tail call void asm sideeffect "nop", "~{ebx}"()
686 tail call void asm sideeffect "nop", "~{ebx}"()
693 ; Another infinite loop test this time with two nested infinite loop.
694 ; CHECK-LABEL: infiniteloop3
696 define void @infiniteloop3() {
698 br i1 undef, label %loop2a, label %body
700 body: ; preds = %entry
701 br i1 undef, label %loop2a, label %end
703 loop1: ; preds = %loop2a, %loop2b
704 %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
705 %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
706 %0 = icmp eq i32* %var, null
707 %next.load = load i32*, i32** undef
708 br i1 %0, label %loop2a, label %loop2b
710 loop2a: ; preds = %loop1, %body, %entry
711 %var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
712 %next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
715 loop2b: ; preds = %loop1
716 %gep1 = bitcast i32* %var.phi to i32*
717 %next.ptr = bitcast i32* %gep1 to i32**
718 store i32* %next.phi, i32** %next.ptr
725 ; Check that we just don't bail out on RegMask.
726 ; In this case, the RegMask does not touch a CSR so we are good to go!
727 ; CHECK-LABEL: regmask:
729 ; Compare the arguments and jump to exit.
730 ; No prologue needed.
731 ; ENABLE: cmpl %esi, %edi
732 ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
735 ; (What we push does not matter. It should be some random sratch register.)
738 ; Compare the arguments and jump to exit.
739 ; After the prologue is set.
740 ; DISABLE: cmpl %esi, %edi
741 ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
744 ; Set the first argument to zero.
745 ; CHECK: xorl %edi, %edi
746 ; Set the second argument to addr.
747 ; CHECK-NEXT: movq %rdx, %rsi
748 ; CHECK-NEXT: callq _doSomething
752 ; CHECK: [[EXIT_LABEL]]:
753 ; Set the first argument to 6.
754 ; CHECK-NEXT: movl $6, %edi
755 ; Set the second argument to addr.
756 ; CHECK-NEXT: movq %rdx, %rsi
758 ; Without shrink-wrapping, we need to restore the stack before
759 ; making the tail call.
763 ; CHECK-NEXT: jmp _doSomething
764 define i32 @regmask(i32 %a, i32 %b, i32* %addr) {
765 %tmp2 = icmp slt i32 %a, %b
766 br i1 %tmp2, label %true, label %false
769 ; Clobber a CSR so that we check something on the regmask
771 tail call void asm sideeffect "nop", "~{ebx}"()
772 %tmp4 = call i32 @doSomething(i32 0, i32* %addr)
776 %tmp5 = tail call i32 @doSomething(i32 6, i32* %addr)
780 %tmp.0 = phi i32 [ %tmp4, %true ], [ %tmp5, %false ]
784 @b = internal unnamed_addr global i1 false
785 @c = internal unnamed_addr global i8 0, align 1
786 @a = common global i32 0, align 4
788 ; Make sure the prologue does not clobber the EFLAGS when
789 ; it is live accross.
791 ; Note: The registers may change in the following patterns, but
792 ; because they imply register hierarchy (e.g., eax, al) this is
793 ; tricky to write robust patterns.
795 ; CHECK-LABEL: useLEAForPrologue:
797 ; Prologue is at the beginning of the function when shrink-wrapping
800 ; The stack adjustment can use SUB instr because we do not need to
801 ; preserve the EFLAGS at this point.
802 ; DISABLE-NEXT: subq $16, %rsp
804 ; Load the value of b.
805 ; Create the zero value for the select assignment.
806 ; CHECK: xorl [[CMOVE_VAL:%eax]], [[CMOVE_VAL]]
807 ; CHECK-NEXT: cmpb $0, _b(%rip)
808 ; CHECK-NEXT: jne [[STOREC_LABEL:LBB[0-9_]+]]
810 ; CHECK: movb $48, [[CMOVE_VAL:%al]]
812 ; CHECK: [[STOREC_LABEL]]:
815 ; For the stack adjustment, we need to preserve the EFLAGS.
816 ; ENABLE-NEXT: leaq -16(%rsp), %rsp
818 ; Technically, we should use CMOVE_VAL here or its subregister.
819 ; CHECK-NEXT: movb %al, _c(%rip)
820 ; testb set the EFLAGS read here.
821 ; CHECK-NEXT: je [[VARFUNC_CALL:LBB[0-9_]+]]
823 ; The code of the loop is not interesting.
826 ; CHECK: [[VARFUNC_CALL]]:
827 ; Set the null parameter.
828 ; CHECK-NEXT: xorl %edi, %edi
829 ; CHECK-NEXT: callq _varfunc
831 ; Set the return value.
832 ; CHECK-NEXT: xorl %eax, %eax
835 ; CHECK-NEXT: addq $16, %rsp
838 define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
841 %.b = load i1, i1* @b, align 1
842 %bool = select i1 %.b, i8 0, i8 48
843 store i8 %bool, i8* @c, align 1
844 br i1 %.b, label %for.body.lr.ph, label %for.end
846 for.body.lr.ph: ; preds = %entry
847 tail call void asm sideeffect "nop", "~{ebx}"()
850 for.body: ; preds = %for.body.lr.ph, %for.body
851 %inc6 = phi i8 [ %c, %for.body.lr.ph ], [ %inc, %for.body ]
852 %cond5 = phi i32 [ %a, %for.body.lr.ph ], [ %conv3, %for.body ]
853 %cmp2 = icmp slt i32 %d, %cond5
854 %conv3 = zext i1 %cmp2 to i32
855 %inc = add i8 %inc6, 1
856 %cmp = icmp slt i8 %inc, 45
857 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
859 for.cond.for.end_crit_edge: ; preds = %for.body
860 store i32 %conv3, i32* @a, align 4
863 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
864 %call = tail call i32 (i8*) @varfunc(i8* null)
868 declare i32 @varfunc(i8* nocapture readonly)
870 @sum1 = external hidden thread_local global i32, align 4
873 ; Function Attrs: nounwind
874 ; Make sure the TLS call used to access @sum1 happens after the prologue
875 ; and before the epilogue.
876 ; TLS calls used to be wrongly model and shrink-wrapping would have inserted
877 ; the prologue and epilogue just around the call to doSomething.
880 ; CHECK-LABEL: tlsCall:
882 ; CHECK: testb $1, %dil
883 ; CHECK: je [[ELSE_LABEL:LBB[0-9_]+]]
886 ; CHECK: movq _sum1@TLVP(%rip), %rdi
887 ; CHECK-NEXT: callq *(%rdi)
888 ; CHECK: jmp [[EXIT_LABEL:LBB[0-9_]+]]
891 ; CHECK: callq _doSomething
896 define i32 @tlsCall(i1 %bool1, i32 %arg, i32* readonly dereferenceable(4) %sum1) #3 {
898 br i1 %bool1, label %master, label %else
901 %tmp1 = load i32, i32* %sum1, align 4
902 store i32 %tmp1, i32* @sum1, align 4
906 %call = call i32 @doSomething(i32 0, i32* null)
910 %res = phi i32 [ %arg, %master], [ %call, %else ]
914 attributes #3 = { nounwind }
916 @irreducibleCFGa = common global i32 0, align 4
917 @irreducibleCFGf = common global i8 0, align 1
918 @irreducibleCFGb = common global i32 0, align 4
920 ; Check that we do not run shrink-wrapping on irreducible CFGs until
921 ; it is actually supported.
922 ; At the moment, on those CFGs the loop information may be incorrect
923 ; and since we use that information to do the placement, we may end up
924 ; inserting the prologue/epilogue at incorrect places.
927 ; CHECK-LABEL: irreducibleCFG:
929 ; Make sure the prologue happens in the entry block.
932 ; Make sure the epilogue happens in the exit block.
937 ; Make sure we emit missed optimization remarks for this.
938 ; REMARKS: Pass: shrink-wrap
939 ; REMARKS-NEXT: Name: UnsupportedIrreducibleCFG
940 ; REMARKS-NEXT: Function: irreducibleCFG
941 ; REMARKS-NEXT: Args:
942 ; REMARKS-NEXT: - String: Irreducible CFGs are not supported yet
944 define i32 @irreducibleCFG() #4 {
946 %i0 = load i32, i32* @irreducibleCFGa, align 4
947 %.pr = load i8, i8* @irreducibleCFGf, align 1
948 %bool = icmp eq i8 %.pr, 0
949 br i1 %bool, label %split, label %preheader
955 %i1 = load i32, i32* @irreducibleCFGb, align 4
956 %tobool1.i = icmp ne i32 %i1, 0
957 br i1 %tobool1.i, label %for.body4.i, label %for.cond8.i.preheader
960 %call.i = tail call i32 (...) @something(i32 %i0)
964 %p1 = phi i32 [ %inc18.i, %for.inc ], [ 0, %for.body4.i ]
965 %.pr1.pr = load i32, i32* @irreducibleCFGb, align 4
966 br label %for.cond8.i.preheader
968 for.cond8.i.preheader:
969 %.pr1 = phi i32 [ %.pr1.pr, %for.cond8 ], [ %i1, %split ]
970 %p13 = phi i32 [ %p1, %for.cond8 ], [ 0, %split ]
977 %inc18.i = add nuw nsw i32 %p13, 1
978 %cmp = icmp slt i32 %inc18.i, 7
979 br i1 %cmp, label %for.cond8, label %fn1.exit
982 attributes #4 = { "no-frame-pointer-elim"="true" }
984 @x = external global i32, align 4
985 @y = external global i32, align 4
987 ; The post-dominator tree does not include the branch containing the infinite
988 ; loop, which can occur into a misplacement of the restore block, if we're
989 ; looking for the nearest common post-dominator of an "unreachable" block.
991 ; CHECK-LABEL: infiniteLoopNoSuccessor:
993 ; Make sure the prologue happens in the entry block.
994 ; CHECK-NEXT: pushq %rbp
996 ; Make sure we don't shrink-wrap.
998 ; CHECK-NOT: pushq %rbp
1000 ; Make sure the epilogue happens in the exit block.
1004 define void @infiniteLoopNoSuccessor() #5 {
1005 %1 = load i32, i32* @x, align 4
1006 %2 = icmp ne i32 %1, 0
1007 br i1 %2, label %3, label %4
1010 store i32 0, i32* @x, align 4
1014 call void (...) @somethingElse()
1015 %5 = load i32, i32* @y, align 4
1016 %6 = icmp ne i32 %5, 0
1017 br i1 %6, label %10, label %7
1020 %8 = call i32 (...) @something()
1024 call void (...) @somethingElse()
1031 declare void @somethingElse(...)
1033 attributes #5 = { nounwind "no-frame-pointer-elim-non-leaf" }