1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -instcombine -S < %s | FileCheck %s
3 ; RUN: opt -passes=instcombine -S < %s | FileCheck %s
5 target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1"
7 @X = constant i32 42 ; <i32*> [#uses=2]
8 @X2 = constant i32 47 ; <i32*> [#uses=1]
9 @Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2]
10 @Z = constant [2 x { i32, float }] zeroinitializer ; <[2 x { i32, float }]*> [#uses=1]
12 @GLOBAL = internal constant [4 x i32] zeroinitializer
16 ; CHECK-LABEL: @test1(
17 ; CHECK-NEXT: ret i32 42
19 %B = load i32, i32* @X ; <i32> [#uses=1]
23 define float @test2() {
24 ; CHECK-LABEL: @test2(
25 ; CHECK-NEXT: ret float 0x3FF3B2FEC0000000
27 %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
28 %B = load float, float* %A ; <float> [#uses=1]
33 ; CHECK-LABEL: @test3(
34 ; CHECK-NEXT: ret i32 12
36 %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
37 %B = load i32, i32* %A ; <i32> [#uses=1]
42 ; CHECK-LABEL: @test4(
43 ; CHECK-NEXT: ret i32 0
45 %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
46 %B = load i32, i32* %A ; <i32> [#uses=1]
50 define i32 @test5(i1 %C) {
51 ; CHECK-LABEL: @test5(
52 ; CHECK-NEXT: [[Z:%.*]] = select i1 [[C:%.*]], i32 42, i32 47
53 ; CHECK-NEXT: ret i32 [[Z]]
55 %Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
56 %Z = load i32, i32* %Y ; <i32> [#uses=1]
60 define i32 @load_gep_null_inbounds(i64 %X) {
61 ; CHECK-LABEL: @load_gep_null_inbounds(
62 ; CHECK-NEXT: store i32 poison, i32* null, align 536870912
63 ; CHECK-NEXT: ret i32 poison
65 %V = getelementptr inbounds i32, i32* null, i64 %X
66 %R = load i32, i32* %V
70 define i32 @load_gep_null_not_inbounds(i64 %X) {
71 ; CHECK-LABEL: @load_gep_null_not_inbounds(
72 ; CHECK-NEXT: store i32 poison, i32* null, align 536870912
73 ; CHECK-NEXT: ret i32 poison
75 %V = getelementptr i32, i32* null, i64 %X
76 %R = load i32, i32* %V
80 define i32 @test7_no_null_opt(i32 %X) #0 {
81 ; CHECK-LABEL: @test7_no_null_opt(
82 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
83 ; CHECK-NEXT: [[V:%.*]] = getelementptr i32, i32* null, i64 [[TMP1]]
84 ; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[V]], align 4
85 ; CHECK-NEXT: ret i32 [[R]]
87 %V = getelementptr i32, i32* null, i32 %X ; <i32*> [#uses=1]
88 %R = load i32, i32* %V ; <i32> [#uses=1]
91 attributes #0 = { null_pointer_is_valid }
93 define i32 @test8(i32* %P) {
94 ; CHECK-LABEL: @test8(
95 ; CHECK-NEXT: store i32 1, i32* [[P:%.*]], align 4
96 ; CHECK-NEXT: ret i32 1
99 %X = load i32, i32* %P ; <i32> [#uses=1]
103 define i32 @test9(i32* %P) {
104 ; CHECK-LABEL: @test9(
105 ; CHECK-NEXT: ret i32 0
107 %X = load i32, i32* %P ; <i32> [#uses=1]
108 %Y = load i32, i32* %P ; <i32> [#uses=1]
109 %Z = sub i32 %X, %Y ; <i32> [#uses=1]
113 define i32 @test10(i1 %C.upgrd.1, i32* %P, i32* %Q) {
114 ; CHECK-LABEL: @test10(
115 ; CHECK-NEXT: br i1 [[C_UPGRD_1:%.*]], label [[T:%.*]], label [[F:%.*]]
117 ; CHECK-NEXT: store i32 1, i32* [[Q:%.*]], align 4
118 ; CHECK-NEXT: br label [[C:%.*]]
120 ; CHECK-NEXT: br label [[C]]
122 ; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
123 ; CHECK-NEXT: ret i32 0
125 br i1 %C.upgrd.1, label %T, label %F
134 %V = load i32, i32* %P ; <i32> [#uses=1]
138 define double @test11(double* %p) {
139 ; CHECK-LABEL: @test11(
140 ; CHECK-NEXT: [[T0:%.*]] = getelementptr double, double* [[P:%.*]], i64 1
141 ; CHECK-NEXT: store double 2.000000e+00, double* [[T0]], align 8
142 ; CHECK-NEXT: ret double 2.000000e+00
144 %t0 = getelementptr double, double* %p, i32 1
145 store double 2.0, double* %t0
146 %t1 = getelementptr double, double* %p, i32 1
147 %x = load double, double* %t1
151 define i32 @test12(i32* %P) {
152 ; CHECK-LABEL: @test12(
153 ; CHECK-NEXT: ret i32 123
156 store i32 123, i32* %A
157 ; Cast the result of the load not the source
158 %Q = bitcast i32* %A to i32*
159 %V = load i32, i32* %Q
163 define <16 x i8> @test13(<2 x i64> %x) {
164 ; CHECK-LABEL: @test13(
165 ; CHECK-NEXT: ret <16 x i8> zeroinitializer
167 %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
171 ; This test must not have the store of %x forwarded to the load -- there is an
172 ; intervening store if %y. However, the intervening store occurs with a different
173 ; type and size and to a different pointer value. This is ensuring that none of
174 ; those confuse the analysis into thinking that the second store does not alias
177 define i8 @test14(i8 %x, i32 %y) {
178 ; CHECK-LABEL: @test14(
179 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
180 ; CHECK-NEXT: [[A_I8:%.*]] = bitcast i32* [[A]] to i8*
181 ; CHECK-NEXT: store i8 [[X:%.*]], i8* [[A_I8]], align 4
182 ; CHECK-NEXT: store i32 [[Y:%.*]], i32* [[A]], align 4
183 ; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[A_I8]], align 4
184 ; CHECK-NEXT: ret i8 [[R]]
187 %a.i8 = bitcast i32* %a to i8*
188 store i8 %x, i8* %a.i8
189 store i32 %y, i32* %a
190 %r = load i8, i8* %a.i8
194 @test15_global = external global i32
196 ; Same test as @test14 essentially, but using a global instead of an alloca.
198 define i8 @test15(i8 %x, i32 %y) {
199 ; CHECK-LABEL: @test15(
200 ; CHECK-NEXT: store i8 [[X:%.*]], i8* bitcast (i32* @test15_global to i8*), align 4
201 ; CHECK-NEXT: store i32 [[Y:%.*]], i32* @test15_global, align 4
202 ; CHECK-NEXT: [[R:%.*]] = load i8, i8* bitcast (i32* @test15_global to i8*), align 4
203 ; CHECK-NEXT: ret i8 [[R]]
205 %g.i8 = bitcast i32* @test15_global to i8*
206 store i8 %x, i8* %g.i8
207 store i32 %y, i32* @test15_global
208 %r = load i8, i8* %g.i8
212 ; Check that we canonicalize loads which are only stored to use integer types
213 ; when there is a valid integer type.
215 define void @test16(i8* %x, i8* %a, i8* %b, i8* %c) {
216 ; CHECK-LABEL: @test16(
218 ; CHECK-NEXT: [[X_CAST:%.*]] = bitcast i8* [[X:%.*]] to float*
219 ; CHECK-NEXT: [[A_CAST:%.*]] = bitcast i8* [[A:%.*]] to float*
220 ; CHECK-NEXT: [[B_CAST:%.*]] = bitcast i8* [[B:%.*]] to float*
221 ; CHECK-NEXT: [[X1:%.*]] = load float, float* [[X_CAST]], align 4
222 ; CHECK-NEXT: store float [[X1]], float* [[A_CAST]], align 4
223 ; CHECK-NEXT: store float [[X1]], float* [[B_CAST]], align 4
224 ; CHECK-NEXT: [[X2:%.*]] = load float, float* [[X_CAST]], align 4
225 ; CHECK-NEXT: store float [[X2]], float* [[B_CAST]], align 4
226 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[C:%.*]] to float*
227 ; CHECK-NEXT: store float [[X2]], float* [[TMP0]], align 4
228 ; CHECK-NEXT: ret void
231 %x.cast = bitcast i8* %x to float*
232 %a.cast = bitcast i8* %a to float*
233 %b.cast = bitcast i8* %b to float*
234 %c.cast = bitcast i8* %c to i32*
236 %x1 = load float, float* %x.cast
237 store float %x1, float* %a.cast
238 store float %x1, float* %b.cast
240 %x2 = load float, float* %x.cast
241 store float %x2, float* %b.cast
242 %x2.cast = bitcast float %x2 to i32
243 store i32 %x2.cast, i32* %c.cast
248 define void @test16-vect(i8* %x, i8* %a, i8* %b, i8* %c) {
249 ; CHECK-LABEL: @test16-vect(
251 ; CHECK-NEXT: [[X_CAST:%.*]] = bitcast i8* [[X:%.*]] to <4 x i8>*
252 ; CHECK-NEXT: [[A_CAST:%.*]] = bitcast i8* [[A:%.*]] to <4 x i8>*
253 ; CHECK-NEXT: [[B_CAST:%.*]] = bitcast i8* [[B:%.*]] to <4 x i8>*
254 ; CHECK-NEXT: [[X1:%.*]] = load <4 x i8>, <4 x i8>* [[X_CAST]], align 4
255 ; CHECK-NEXT: store <4 x i8> [[X1]], <4 x i8>* [[A_CAST]], align 4
256 ; CHECK-NEXT: store <4 x i8> [[X1]], <4 x i8>* [[B_CAST]], align 4
257 ; CHECK-NEXT: [[X2:%.*]] = load <4 x i8>, <4 x i8>* [[X_CAST]], align 4
258 ; CHECK-NEXT: store <4 x i8> [[X2]], <4 x i8>* [[B_CAST]], align 4
259 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[C:%.*]] to <4 x i8>*
260 ; CHECK-NEXT: store <4 x i8> [[X2]], <4 x i8>* [[TMP0]], align 4
261 ; CHECK-NEXT: ret void
264 %x.cast = bitcast i8* %x to <4 x i8>*
265 %a.cast = bitcast i8* %a to <4 x i8>*
266 %b.cast = bitcast i8* %b to <4 x i8>*
267 %c.cast = bitcast i8* %c to i32*
269 %x1 = load <4 x i8>, <4 x i8>* %x.cast
270 store <4 x i8> %x1, <4 x i8>* %a.cast
271 store <4 x i8> %x1, <4 x i8>* %b.cast
273 %x2 = load <4 x i8>, <4 x i8>* %x.cast
274 store <4 x i8> %x2, <4 x i8>* %b.cast
275 %x2.cast = bitcast <4 x i8> %x2 to i32
276 store i32 %x2.cast, i32* %c.cast
282 ; Check that in cases similar to @test16 we don't try to rewrite a load when
283 ; its only use is a store but it is used as the pointer to that store rather
286 define void @test17(i8** %x, i8 %y) {
287 ; CHECK-LABEL: @test17(
289 ; CHECK-NEXT: [[X_LOAD:%.*]] = load i8*, i8** [[X:%.*]], align 8
290 ; CHECK-NEXT: store i8 [[Y:%.*]], i8* [[X_LOAD]], align 1
291 ; CHECK-NEXT: ret void
294 %x.load = load i8*, i8** %x
295 store i8 %y, i8* %x.load
300 ; Check that we don't try change the type of the load by inserting a bitcast
301 ; generating invalid IR.
302 %swift.error = type opaque
303 declare void @useSwiftError(%swift.error** swifterror)
305 define void @test18(%swift.error** swifterror %err) {
306 ; CHECK-LABEL: @test18(
308 ; CHECK-NEXT: [[SWIFTERROR:%.*]] = alloca swifterror %swift.error*, align 8
309 ; CHECK-NEXT: store %swift.error* null, %swift.error** [[SWIFTERROR]], align 8
310 ; CHECK-NEXT: call void @useSwiftError(%swift.error** nonnull swifterror [[SWIFTERROR]])
311 ; CHECK-NEXT: [[ERR_RES:%.*]] = load %swift.error*, %swift.error** [[SWIFTERROR]], align 8
312 ; CHECK-NEXT: store %swift.error* [[ERR_RES]], %swift.error** [[ERR:%.*]], align 8
313 ; CHECK-NEXT: ret void
316 %swifterror = alloca swifterror %swift.error*, align 8
317 store %swift.error* null, %swift.error** %swifterror, align 8
318 call void @useSwiftError(%swift.error** nonnull swifterror %swifterror)
319 %err.res = load %swift.error*, %swift.error** %swifterror, align 8
320 store %swift.error* %err.res, %swift.error** %err, align 8
324 ; Make sure we preseve the type of the store to a swifterror pointer.
326 declare void @initi8(i8**)
327 define void @test19(%swift.error** swifterror %err) {
328 ; CHECK-LABEL: @test19(
330 ; CHECK-NEXT: [[TMP:%.*]] = alloca i8*, align 8
331 ; CHECK-NEXT: call void @initi8(i8** nonnull [[TMP]])
332 ; CHECK-NEXT: [[SWIFTERROR:%.*]] = bitcast i8** [[TMP]] to %swift.error**
333 ; CHECK-NEXT: [[ERR_RES:%.*]] = load %swift.error*, %swift.error** [[SWIFTERROR]], align 8
334 ; CHECK-NEXT: store %swift.error* [[ERR_RES]], %swift.error** [[ERR:%.*]], align 8
335 ; CHECK-NEXT: ret void
338 %tmp = alloca i8*, align 8
339 call void @initi8(i8** %tmp)
340 %swifterror = bitcast i8** %tmp to %swift.error**
341 %err.res = load %swift.error*, %swift.error** %swifterror, align 8
342 store %swift.error* %err.res, %swift.error** %err, align 8
346 ; Make sure we don't canonicalize accesses to scalable vectors.
347 define void @test20(<vscale x 4 x i8>* %x, <vscale x 4 x i8>* %y) {
348 ; CHECK-LABEL: @test20(
349 ; CHECK-NEXT: [[X_LOAD:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[X:%.*]], align 1
350 ; CHECK-NEXT: store <vscale x 4 x i8> [[X_LOAD]], <vscale x 4 x i8>* [[Y:%.*]], align 1
351 ; CHECK-NEXT: ret void
353 %x.load = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x, align 1
354 store <vscale x 4 x i8> %x.load, <vscale x 4 x i8>* %y, align 1
359 ; Check that non-integral pointers are not coverted using inttoptr
361 declare void @use(i8*)
362 declare void @use.p1(i8 addrspace(1)*)
364 define i64 @test21(i64* %P) {
365 ; CHECK-LABEL: @test21(
366 ; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[P:%.*]], align 8
367 ; CHECK-NEXT: [[Y_CAST:%.*]] = inttoptr i64 [[X]] to i8*
368 ; CHECK-NEXT: call void @use(i8* [[Y_CAST]])
369 ; CHECK-NEXT: ret i64 [[X]]
371 %P.ptr = bitcast i64* %P to i8**
372 %X = load i64, i64* %P
373 %Y = load i8*, i8** %P.ptr
374 call void @use(i8* %Y)
378 define i64 @test22(i64* %P) {
379 ; CHECK-LABEL: @test22(
380 ; CHECK-NEXT: [[P_PTR:%.*]] = bitcast i64* [[P:%.*]] to i8 addrspace(1)**
381 ; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[P]], align 8
382 ; CHECK-NEXT: [[Y:%.*]] = load i8 addrspace(1)*, i8 addrspace(1)** [[P_PTR]], align 8
383 ; CHECK-NEXT: call void @use.p1(i8 addrspace(1)* [[Y]])
384 ; CHECK-NEXT: ret i64 [[X]]
386 %P.ptr = bitcast i64* %P to i8 addrspace(1)**
387 %X = load i64, i64* %P
388 %Y = load i8 addrspace(1)*, i8 addrspace(1)** %P.ptr
389 call void @use.p1(i8 addrspace(1)* %Y)
393 declare void @use.v2.p0(<2 x i8*>)
394 declare void @use.v2.p1(<2 x i8 addrspace(1)*>)
396 define <2 x i64> @test23(<2 x i64>* %P) {
397 ; CHECK-LABEL: @test23(
398 ; CHECK-NEXT: [[P_PTR:%.*]] = bitcast <2 x i64>* [[P:%.*]] to <2 x i8*>*
399 ; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, <2 x i64>* [[P]], align 16
400 ; CHECK-NEXT: [[Y:%.*]] = load <2 x i8*>, <2 x i8*>* [[P_PTR]], align 16
401 ; CHECK-NEXT: call void @use.v2.p0(<2 x i8*> [[Y]])
402 ; CHECK-NEXT: ret <2 x i64> [[X]]
404 %P.ptr = bitcast <2 x i64>* %P to <2 x i8*>*
405 %X = load <2 x i64>, <2 x i64>* %P
406 %Y = load <2 x i8*>, <2 x i8*>* %P.ptr
407 call void @use.v2.p0(<2 x i8*> %Y)
411 define <2 x i64> @test24(<2 x i64>* %P) {
412 ; CHECK-LABEL: @test24(
413 ; CHECK-NEXT: [[P_PTR:%.*]] = bitcast <2 x i64>* [[P:%.*]] to <2 x i8 addrspace(1)*>*
414 ; CHECK-NEXT: [[X:%.*]] = load <2 x i64>, <2 x i64>* [[P]], align 16
415 ; CHECK-NEXT: [[Y:%.*]] = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*>* [[P_PTR]], align 16
416 ; CHECK-NEXT: call void @use.v2.p1(<2 x i8 addrspace(1)*> [[Y]])
417 ; CHECK-NEXT: ret <2 x i64> [[X]]
419 %P.ptr = bitcast <2 x i64>* %P to <2 x i8 addrspace(1)*>*
420 %X = load <2 x i64>, <2 x i64>* %P
421 %Y = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*>* %P.ptr
422 call void @use.v2.p1(<2 x i8 addrspace(1)*> %Y)