1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 define void @test1(i32* %P) {
6 ; CHECK-NEXT: store i32 123, i32* undef, align 4
7 ; CHECK-NEXT: store i32 poison, i32* null, align 536870912
10 store i32 undef, i32* %P
11 store i32 123, i32* undef
12 store i32 124, i32* null
16 define void @test2(i32* %P) {
17 ; CHECK-LABEL: @test2(
18 ; CHECK-NEXT: ret void
20 %X = load i32, i32* %P
26 define void @store_at_gep_off_null_inbounds(i64 %offset) {
27 ; CHECK-LABEL: @store_at_gep_off_null_inbounds(
28 ; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* null, i64 [[OFFSET:%.*]]
29 ; CHECK-NEXT: store i32 poison, i32* [[PTR]], align 4
30 ; CHECK-NEXT: ret void
32 %ptr = getelementptr inbounds i32, i32 *null, i64 %offset
33 store i32 24, i32* %ptr
37 define void @store_at_gep_off_null_not_inbounds(i64 %offset) {
38 ; CHECK-LABEL: @store_at_gep_off_null_not_inbounds(
39 ; CHECK-NEXT: [[PTR:%.*]] = getelementptr i32, i32* null, i64 [[OFFSET:%.*]]
40 ; CHECK-NEXT: store i32 poison, i32* [[PTR]], align 4
41 ; CHECK-NEXT: ret void
43 %ptr = getelementptr i32, i32 *null, i64 %offset
44 store i32 24, i32* %ptr
48 define void @store_at_gep_off_no_null_opt(i64 %offset) #0 {
49 ; CHECK-LABEL: @store_at_gep_off_no_null_opt(
50 ; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i32, i32* null, i64 [[OFFSET:%.*]]
51 ; CHECK-NEXT: store i32 24, i32* [[PTR]], align 4
52 ; CHECK-NEXT: ret void
54 %ptr = getelementptr inbounds i32, i32 *null, i64 %offset
55 store i32 24, i32* %ptr
59 attributes #0 = { null_pointer_is_valid }
61 ;; Simple sinking tests
64 define i32 @test3(i1 %C) {
65 ; CHECK-LABEL: @test3(
66 ; CHECK-NEXT: br i1 [[C:%.*]], label [[COND:%.*]], label [[COND2:%.*]]
68 ; CHECK-NEXT: br label [[CONT:%.*]]
70 ; CHECK-NEXT: br label [[CONT]]
72 ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 47, [[COND2]] ], [ -987654321, [[COND]] ]
73 ; CHECK-NEXT: ret i32 [[STOREMERGE]]
76 br i1 %C, label %Cond, label %Cond2
79 store i32 -987654321, i32* %A
87 %V = load i32, i32* %A
92 define i32 @test4(i1 %C) {
93 ; CHECK-LABEL: @test4(
94 ; CHECK-NEXT: br i1 [[C:%.*]], label [[COND:%.*]], label [[CONT:%.*]]
96 ; CHECK-NEXT: br label [[CONT]]
98 ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ -987654321, [[COND]] ], [ 47, [[TMP0:%.*]] ]
99 ; CHECK-NEXT: ret i32 [[STOREMERGE]]
102 store i32 47, i32* %A
103 br i1 %C, label %Cond, label %Cont
106 store i32 -987654321, i32* %A
110 %V = load i32, i32* %A
115 define void @test5(i1 %C, i32* %P) {
116 ; CHECK-LABEL: @test5(
117 ; CHECK-NEXT: br i1 [[C:%.*]], label [[COND:%.*]], label [[CONT:%.*]]
119 ; CHECK-NEXT: br label [[CONT]]
121 ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ -987654321, [[COND]] ], [ 47, [[TMP0:%.*]] ]
122 ; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[P:%.*]], align 1
123 ; CHECK-NEXT: ret void
125 store i32 47, i32* %P, align 1
126 br i1 %C, label %Cond, label %Cont
129 store i32 -987654321, i32* %P, align 1
137 ; PR14753 - merging two stores should preserve the TBAA tag.
138 define void @test6(i32 %n, float* %a, i32* %gi) nounwind uwtable ssp {
139 ; CHECK-LABEL: @test6(
141 ; CHECK-NEXT: br label [[FOR_COND:%.*]]
143 ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 42, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
144 ; CHECK-NEXT: store i32 [[STOREMERGE]], i32* [[GI:%.*]], align 4, !tbaa [[TBAA0:![0-9]+]]
145 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[STOREMERGE]], [[N:%.*]]
146 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
148 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE]] to i64
149 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[IDXPROM]]
150 ; CHECK-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !tbaa [[TBAA4:![0-9]+]]
151 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[GI]], align 4, !tbaa [[TBAA0]]
152 ; CHECK-NEXT: [[INC]] = add nsw i32 [[TMP0]], 1
153 ; CHECK-NEXT: br label [[FOR_COND]]
155 ; CHECK-NEXT: ret void
158 store i32 42, i32* %gi, align 4, !tbaa !0
162 %storemerge = phi i32 [ 0, %entry ], [ %inc, %for.body ]
163 %0 = load i32, i32* %gi, align 4, !tbaa !0
164 %cmp = icmp slt i32 %0, %n
165 br i1 %cmp, label %for.body, label %for.end
168 %idxprom = sext i32 %0 to i64
169 %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
170 store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
171 %1 = load i32, i32* %gi, align 4, !tbaa !0
172 %inc = add nsw i32 %1, 1
173 store i32 %inc, i32* %gi, align 4, !tbaa !0
180 define void @dse1(i32* %p) {
181 ; CHECK-LABEL: @dse1(
182 ; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
183 ; CHECK-NEXT: ret void
190 ; Slightly subtle: if we're mixing atomic and non-atomic access to the
191 ; same location, then the contents of the location are undefined if there's
192 ; an actual race. As such, we're free to pick either store under the
193 ; assumption that we're not racing with any other thread.
194 define void @dse2(i32* %p) {
195 ; CHECK-LABEL: @dse2(
196 ; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
197 ; CHECK-NEXT: ret void
199 store atomic i32 0, i32* %p unordered, align 4
204 define void @dse3(i32* %p) {
205 ; CHECK-LABEL: @dse3(
206 ; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
207 ; CHECK-NEXT: ret void
210 store atomic i32 0, i32* %p unordered, align 4
214 define void @dse4(i32* %p) {
215 ; CHECK-LABEL: @dse4(
216 ; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
217 ; CHECK-NEXT: ret void
219 store atomic i32 0, i32* %p unordered, align 4
220 store atomic i32 0, i32* %p unordered, align 4
224 ; Implementation limit - could remove unordered store here, but
226 define void @dse5(i32* %p) {
227 ; CHECK-LABEL: @dse5(
228 ; CHECK-NEXT: store atomic i32 0, i32* [[P:%.*]] unordered, align 4
229 ; CHECK-NEXT: store atomic i32 0, i32* [[P]] seq_cst, align 4
230 ; CHECK-NEXT: ret void
232 store atomic i32 0, i32* %p unordered, align 4
233 store atomic i32 0, i32* %p seq_cst, align 4
237 define void @write_back1(i32* %p) {
238 ; CHECK-LABEL: @write_back1(
239 ; CHECK-NEXT: ret void
241 %v = load i32, i32* %p
242 store i32 %v, i32* %p
246 define void @write_back2(i32* %p) {
247 ; CHECK-LABEL: @write_back2(
248 ; CHECK-NEXT: ret void
250 %v = load atomic i32, i32* %p unordered, align 4
251 store i32 %v, i32* %p
255 define void @write_back3(i32* %p) {
256 ; CHECK-LABEL: @write_back3(
257 ; CHECK-NEXT: ret void
259 %v = load i32, i32* %p
260 store atomic i32 %v, i32* %p unordered, align 4
264 define void @write_back4(i32* %p) {
265 ; CHECK-LABEL: @write_back4(
266 ; CHECK-NEXT: ret void
268 %v = load atomic i32, i32* %p unordered, align 4
269 store atomic i32 %v, i32* %p unordered, align 4
273 ; Can't remove store due to ordering side effect
274 define void @write_back5(i32* %p) {
275 ; CHECK-LABEL: @write_back5(
276 ; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
277 ; CHECK-NEXT: store atomic i32 [[V]], i32* [[P]] seq_cst, align 4
278 ; CHECK-NEXT: ret void
280 %v = load atomic i32, i32* %p unordered, align 4
281 store atomic i32 %v, i32* %p seq_cst, align 4
285 define void @write_back6(i32* %p) {
286 ; CHECK-LABEL: @write_back6(
287 ; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
288 ; CHECK-NEXT: ret void
290 %v = load atomic i32, i32* %p seq_cst, align 4
291 store atomic i32 %v, i32* %p unordered, align 4
295 define void @write_back7(i32* %p) {
296 ; CHECK-LABEL: @write_back7(
297 ; CHECK-NEXT: [[V:%.*]] = load atomic volatile i32, i32* [[P:%.*]] seq_cst, align 4
298 ; CHECK-NEXT: ret void
300 %v = load atomic volatile i32, i32* %p seq_cst, align 4
301 store atomic i32 %v, i32* %p unordered, align 4
305 @Unknown = external constant i32
307 define void @store_to_constant() {
308 ; CHECK-LABEL: @store_to_constant(
309 ; CHECK-NEXT: ret void
311 store i32 0, i32* @Unknown
315 !0 = !{!4, !4, i64 0}
316 !1 = !{!"omnipotent char", !2}
317 !2 = !{!"Simple C/C++ TBAA"}