1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -simplifycfg -simplifycfg-require-and-preserve-domtree=1 -simplifycfg-max-small-block-size=6 -S < %s | FileCheck %s
3 ; RUN: opt -passes=simplifycfg -simplifycfg-max-small-block-size=6 -S < %s | FileCheck %s
5 target datalayout = "e-p:64:64-p5:32:32-A5"
7 declare void @llvm.assume(i1)
8 declare i1 @llvm.type.test(i8*, metadata) nounwind readnone
10 define void @test_01(i1 %c, i64* align 1 %ptr) local_unnamed_addr #0 {
11 ; CHECK-LABEL: @test_01(
12 ; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
14 ; CHECK-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 4
15 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
16 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
17 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
18 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
19 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
20 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8
21 ; CHECK-NEXT: br label [[COMMON_RET:%.*]]
23 ; CHECK-NEXT: ret void
24 ; CHECK: true2.critedge:
25 ; CHECK-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
26 ; CHECK-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
27 ; CHECK-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
28 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])
29 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
30 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8
31 ; CHECK-NEXT: br label [[COMMON_RET]]
33 br i1 %c, label %true1, label %false1
35 true1: ; preds = %false1, %0
36 %ptrint = ptrtoint i64* %ptr to i64
37 %maskedptr = and i64 %ptrint, 7
38 %maskcond = icmp eq i64 %maskedptr, 0
39 tail call void @llvm.assume(i1 %maskcond)
40 store volatile i64 0, i64* %ptr, align 8
41 br i1 %c, label %true2, label %false2
44 store volatile i64 1, i64* %ptr, align 4
47 true2: ; preds = %true1
48 store volatile i64 2, i64* %ptr, align 8
51 false2: ; preds = %true1
52 store volatile i64 3, i64* %ptr, align 8
56 ; Corner case: the block has max possible size for which we still do PRE.
57 define void @test_02(i1 %c, i64* align 1 %ptr) local_unnamed_addr #0 {
58 ; CHECK-LABEL: @test_02(
59 ; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
61 ; CHECK-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 4
62 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
63 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
64 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
65 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
66 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
67 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
68 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
69 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
70 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
71 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
72 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8
73 ; CHECK-NEXT: br label [[COMMON_RET:%.*]]
75 ; CHECK-NEXT: ret void
76 ; CHECK: true2.critedge:
77 ; CHECK-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
78 ; CHECK-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
79 ; CHECK-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
80 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])
81 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
82 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
83 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
84 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
85 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
86 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
87 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8
88 ; CHECK-NEXT: br label [[COMMON_RET]]
90 br i1 %c, label %true1, label %false1
92 true1: ; preds = %false1, %0
93 %ptrint = ptrtoint i64* %ptr to i64
94 %maskedptr = and i64 %ptrint, 7
95 %maskcond = icmp eq i64 %maskedptr, 0
96 tail call void @llvm.assume(i1 %maskcond)
97 store volatile i64 0, i64* %ptr, align 8
98 store volatile i64 -1, i64* %ptr, align 8
99 store volatile i64 -1, i64* %ptr, align 8
100 store volatile i64 -1, i64* %ptr, align 8
101 store volatile i64 -1, i64* %ptr, align 8
102 store volatile i64 -1, i64* %ptr, align 8
103 br i1 %c, label %true2, label %false2
106 store volatile i64 1, i64* %ptr, align 4
109 true2: ; preds = %true1
110 store volatile i64 2, i64* %ptr, align 8
113 false2: ; preds = %true1
114 store volatile i64 3, i64* %ptr, align 8
118 ; This block is too huge for PRE.
119 define void @test_03(i1 %c, i64* align 1 %ptr) local_unnamed_addr #0 {
120 ; CHECK-LABEL: @test_03(
121 ; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE1:%.*]], label [[FALSE1:%.*]]
123 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR:%.*]] to i64
124 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
125 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
126 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
127 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
128 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
129 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
130 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
131 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
132 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
133 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
134 ; CHECK-NEXT: br i1 [[C]], label [[TRUE2:%.*]], label [[FALSE2:%.*]]
136 ; CHECK-NEXT: store volatile i64 1, i64* [[PTR]], align 4
137 ; CHECK-NEXT: br label [[TRUE1]]
139 ; CHECK-NEXT: ret void
141 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8
142 ; CHECK-NEXT: br label [[COMMON_RET:%.*]]
144 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8
145 ; CHECK-NEXT: br label [[COMMON_RET]]
147 br i1 %c, label %true1, label %false1
149 true1: ; preds = %false1, %0
150 %ptrint = ptrtoint i64* %ptr to i64
151 %maskedptr = and i64 %ptrint, 7
152 %maskcond = icmp eq i64 %maskedptr, 0
153 tail call void @llvm.assume(i1 %maskcond)
154 store volatile i64 0, i64* %ptr, align 8
155 store volatile i64 -1, i64* %ptr, align 8
156 store volatile i64 -1, i64* %ptr, align 8
157 store volatile i64 -1, i64* %ptr, align 8
158 store volatile i64 -1, i64* %ptr, align 8
159 store volatile i64 -1, i64* %ptr, align 8
160 store volatile i64 -1, i64* %ptr, align 8
161 br i1 %c, label %true2, label %false2
164 store volatile i64 1, i64* %ptr, align 4
167 true2: ; preds = %true1
168 store volatile i64 2, i64* %ptr, align 8
171 false2: ; preds = %true1
172 store volatile i64 3, i64* %ptr, align 8
176 ; Try the max block size for PRE again but with the bitcast/type test/assume
177 ; sequence used for whole program devirt.
178 define void @test_04(i1 %c, i64* align 1 %ptr, [3 x i8*]* %vtable) local_unnamed_addr #0 {
179 ; CHECK-LABEL: @test_04(
180 ; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
182 ; CHECK-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 4
183 ; CHECK-NEXT: [[VTABLEI8:%.*]] = bitcast [3 x i8*]* [[VTABLE:%.*]] to i8*
184 ; CHECK-NEXT: [[P:%.*]] = call i1 @llvm.type.test(i8* [[VTABLEI8]], metadata !"foo")
185 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[P]])
186 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
187 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
188 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
189 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
190 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
191 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
192 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8
193 ; CHECK-NEXT: br label [[COMMON_RET:%.*]]
195 ; CHECK-NEXT: ret void
196 ; CHECK: true2.critedge:
197 ; CHECK-NEXT: [[VTABLEI8_C:%.*]] = bitcast [3 x i8*]* [[VTABLE]] to i8*
198 ; CHECK-NEXT: [[P_C:%.*]] = call i1 @llvm.type.test(i8* [[VTABLEI8_C]], metadata !"foo")
199 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[P_C]])
200 ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8
201 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
202 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
203 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
204 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
205 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
206 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8
207 ; CHECK-NEXT: br label [[COMMON_RET]]
209 br i1 %c, label %true1, label %false1
211 true1: ; preds = %false1, %0
212 %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
213 %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"foo")
214 tail call void @llvm.assume(i1 %p)
215 store volatile i64 0, i64* %ptr, align 8
216 store volatile i64 -1, i64* %ptr, align 8
217 store volatile i64 -1, i64* %ptr, align 8
218 store volatile i64 -1, i64* %ptr, align 8
219 store volatile i64 -1, i64* %ptr, align 8
220 store volatile i64 -1, i64* %ptr, align 8
221 br i1 %c, label %true2, label %false2
224 store volatile i64 1, i64* %ptr, align 4
227 true2: ; preds = %true1
228 store volatile i64 2, i64* %ptr, align 8
231 false2: ; preds = %true1
232 store volatile i64 3, i64* %ptr, align 8