1 # Prevent the machine scheduler from moving instructions past FAKE_USE.
2 # RUN: llc -run-pass machine-scheduler -mtriple=x86_64-unknown-linux -debug-only=machine-scheduler 2>&1 -o - %s | FileCheck %s
5 # We make sure that, beginning with the first FAKE_USE instruction,
6 # no changes to the sequence of instructions are undertaken by the
7 # scheduler. We don't bother to check that the order of the FAKE_USEs
8 # remains the same. They should, but it is irrelevant.
10 # CHECK: ********** MI Scheduling **********
11 # CHECK-NEXT: foo:%bb.0 entry
12 # CHECK-NEXT: From: %0:gr64 = COPY $rdi
13 # CHECK-NEXT: To: FAKE_USE %5:gr64
14 # CHECK-NEXT: RegionInstrs: 7
16 # CHECK: ********** MI Scheduling **********
17 # CHECK-NEXT: bar:%bb.0 entry
18 # CHECK-NEXT: From: %0:gr64 = COPY $rdi
19 # CHECK-NEXT: To: RET 0, killed $rax
20 # CHECK-NEXT: RegionInstrs: 7
23 ; ModuleID = 'test.ll'
24 source_filename = "test.ll"
25 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
27 @glb = common dso_local local_unnamed_addr global [100 x i32] zeroinitializer, align 16
29 define dso_local i64 @foo(ptr %p) local_unnamed_addr optdebug {
31 %0 = load i32, ptr @glb, align 16
32 store i32 %0, ptr %p, align 4
33 %conv = sext i32 %0 to i64
34 %1 = load i32, ptr getelementptr inbounds ([100 x i32], ptr @glb, i64 0, i64 1), align 4
35 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
36 store i32 %1, ptr %arrayidx1, align 4
37 %conv2 = sext i32 %1 to i64
38 %add3 = add nsw i64 %conv2, %conv
39 notail call void (...) @llvm.fake.use(i64 %add3)
40 notail call void (...) @llvm.fake.use(i32 %1)
41 notail call void (...) @llvm.fake.use(i32 %0)
42 notail call void (...) @llvm.fake.use(ptr %p)
46 define dso_local i64 @bar(ptr %p) local_unnamed_addr optdebug {
48 %0 = load i32, ptr @glb, align 16
49 store i32 %0, ptr %p, align 4
50 %conv = sext i32 %0 to i64
51 %1 = load i32, ptr getelementptr inbounds ([100 x i32], ptr @glb, i64 0, i64 1), align 4
52 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
53 store i32 %1, ptr %arrayidx1, align 4
54 %conv2 = sext i32 %1 to i64
55 %add3 = add nsw i64 %conv2, %conv
59 ; Function Attrs: nocallback nofree nosync nounwind willreturn
60 declare void @llvm.stackprotector(ptr, ptr)
66 tracksRegLiveness: true
69 - { id: 0, class: gr64, preferred-register: '' }
70 - { id: 1, class: gr64_with_sub_8bit, preferred-register: '' }
71 - { id: 2, class: gr32, preferred-register: '' }
72 - { id: 3, class: gr64_with_sub_8bit, preferred-register: '' }
73 - { id: 4, class: gr32, preferred-register: '' }
74 - { id: 5, class: gr64, preferred-register: '' }
76 - { reg: '$rdi', virtual-reg: '%0' }
82 %1:gr64_with_sub_8bit = MOVSX64rm32 $rip, 1, $noreg, @glb, $noreg
83 MOV32mr %0, 1, $noreg, 0, $noreg, %1.sub_32bit
84 %3:gr64_with_sub_8bit = MOVSX64rm32 $rip, 1, $noreg, @glb + 4, $noreg
85 MOV32mr %0, 1, $noreg, 4, $noreg, %3.sub_32bit
87 %5:gr64 = nsw ADD64rr %5, %1, implicit-def dead $eflags
99 tracksRegLiveness: true
102 - { id: 0, class: gr64, preferred-register: '' }
103 - { id: 1, class: gr64_with_sub_8bit, preferred-register: '' }
104 - { id: 2, class: gr32, preferred-register: '' }
105 - { id: 3, class: gr64_with_sub_8bit, preferred-register: '' }
106 - { id: 4, class: gr32, preferred-register: '' }
107 - { id: 5, class: gr64_with_sub_8bit, preferred-register: '' }
109 - { reg: '$rdi', virtual-reg: '%0' }
115 %1:gr64_with_sub_8bit = MOVSX64rm32 $rip, 1, $noreg, @glb, $noreg
116 MOV32mr %0, 1, $noreg, 0, $noreg, %1.sub_32bit
117 %5:gr64_with_sub_8bit = MOVSX64rm32 $rip, 1, $noreg, @glb + 4, $noreg
118 MOV32mr %0, 1, $noreg, 4, $noreg, %5.sub_32bit
119 %5:gr64_with_sub_8bit = nsw ADD64rr %5, %1, implicit-def dead $eflags