1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=dse -S | FileCheck %s
4 @BUFFER = external local_unnamed_addr global [0 x i8], align 1
6 define void @ArrayTestFullyOverlapping(i64 %0) {
8 ; The DSE pass will try to kill the store of size i32 using the store of
9 ; size i64 because they fully overlap, in fact:
11 ; - they use the same base pointer (in SCEV style '@BUFFER + %0')
12 ; - the offset between the two stores is 32 bits
13 ; - the size of the earlier store is 32 bits
14 ; - the size of the later store is 64 bits
16 ; CHECK-LABEL: @ArrayTestFullyOverlapping(
17 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP0:%.*]], -8
18 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
19 ; CHECK-NEXT: store i64 0, ptr [[TMP3]], align 4
20 ; CHECK-NEXT: ret void
23 %3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
25 %5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
31 define void @VectorTestFullyOverlapping(ptr %arg, i32 %i) {
32 ; CHECK-LABEL: @VectorTestFullyOverlapping(
34 ; CHECK-NEXT: [[I2:%.*]] = zext i32 [[I:%.*]] to i64
35 ; CHECK-NEXT: [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
36 ; CHECK-NEXT: store <2 x float> zeroinitializer, ptr [[I3]], align 16
37 ; CHECK-NEXT: ret void
40 %i7 = add nuw nsw i32 %i, 1
41 %i8 = zext i32 %i7 to i64
42 %i9 = getelementptr inbounds float, ptr %arg, i64 %i8
43 store float 0.0, ptr %i9, align 4
44 %i2 = zext i32 %i to i64
45 %i3 = getelementptr inbounds float, ptr %arg, i64 %i2
46 store <2 x float> <float 0.0, float 0.0>, ptr %i3, align 16
50 define void @ScalableVectorTestFullyOverlapping(ptr %arg, i32 %i) vscale_range(1, 2) {
51 ; CHECK-LABEL: @ScalableVectorTestFullyOverlapping(
52 ; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I:%.*]], 1
53 ; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
54 ; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_1]]
55 ; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_ARG_I_1]], align 4
56 ; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
57 ; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
58 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
59 ; CHECK-NEXT: ret void
61 %i.1 = add nuw nsw i32 %i, 1
62 %ext.i.1 = zext i32 %i.1 to i64
63 %gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
64 store float 0.0, ptr %gep.arg.i.1
65 %ext.i = zext i32 %i to i64
66 %gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
67 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
71 define void @ScalableVectorTestFullyOverlapping2(ptr %arg, i32 %i) {
72 ; CHECK-LABEL: @ScalableVectorTestFullyOverlapping2(
73 ; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I:%.*]], 1
74 ; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
75 ; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_1]]
76 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_1]], align 8
77 ; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
78 ; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
79 ; CHECK-NEXT: store <vscale x 4 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 16
80 ; CHECK-NEXT: ret void
82 %i.1 = add nuw nsw i32 %i, 1
83 %ext.i.1 = zext i32 %i.1 to i64
84 %gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
85 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.1
86 %ext.i = zext i32 %i to i64
87 %gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
88 store <vscale x 4 x float> zeroinitializer, ptr %gep.arg.i
92 define void @ScalableVectorTestNonOverlapping(ptr %arg, i32 %i) vscale_range(1, 2) {
93 ; CHECK-LABEL: @ScalableVectorTestNonOverlapping(
94 ; CHECK-NEXT: [[I_10:%.*]] = add nuw nsw i32 [[I:%.*]], 10
95 ; CHECK-NEXT: [[EXT_I_10:%.*]] = zext i32 [[I_10]] to i64
96 ; CHECK-NEXT: [[GEP_ARG_I_10:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_10]]
97 ; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_ARG_I_10]], align 4
98 ; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
99 ; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
100 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
101 ; CHECK-NEXT: ret void
103 %i.10 = add nuw nsw i32 %i, 10
104 %ext.i.10 = zext i32 %i.10 to i64
105 %gep.arg.i.10 = getelementptr inbounds float, ptr %arg, i64 %ext.i.10
106 store float 0.0, ptr %gep.arg.i.10
107 %ext.i = zext i32 %i to i64
108 %gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
109 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
113 define void @ScalableVectorTestNonOverlapping2(ptr %arg, i32 %i) vscale_range(1, 2) {
114 ; CHECK-LABEL: @ScalableVectorTestNonOverlapping2(
115 ; CHECK-NEXT: [[I_10:%.*]] = add nuw nsw i32 [[I:%.*]], 10
116 ; CHECK-NEXT: [[EXT_I_10:%.*]] = zext i32 [[I_10]] to i64
117 ; CHECK-NEXT: [[GEP_ARG_I_10:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I_10]]
118 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_10]], align 8
119 ; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I]] to i64
120 ; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I]]
121 ; CHECK-NEXT: store <vscale x 4 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 16
122 ; CHECK-NEXT: ret void
124 %i.10 = add nuw nsw i32 %i, 10
125 %ext.i.10 = zext i32 %i.10 to i64
126 %gep.arg.i.10 = getelementptr inbounds float, ptr %arg, i64 %ext.i.10
127 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.10
128 %ext.i = zext i32 %i to i64
129 %gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
130 store <vscale x 4 x float> zeroinitializer, ptr %gep.arg.i
134 define void @ArrayTestPartiallyOverlapping(i64 %0) {
136 ; The DSE pass will not kill the store because the overlap is partial
137 ; and won't fully clobber the i32 store.
139 ; CHECK-LABEL: @ArrayTestPartiallyOverlapping(
140 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP0:%.*]], 10
141 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP2]]
142 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 15
143 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 [[TMP4]]
144 ; CHECK-NEXT: store i32 1, ptr [[TMP5]], align 4
145 ; CHECK-NEXT: store i64 0, ptr [[TMP3]], align 4
146 ; CHECK-NEXT: ret void
149 %3 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %2
151 %5 = getelementptr inbounds [0 x i8], ptr @BUFFER, i64 0, i64 %4
157 define void @VectorTestPartiallyOverlapping(ptr %arg, i32 %i) {
159 ; The DSE pass will not kill the store because the overlap is partial
160 ; and won't fully clobber the original store.
162 ; CHECK-LABEL: @VectorTestPartiallyOverlapping(
164 ; CHECK-NEXT: [[I2:%.*]] = zext i32 [[I:%.*]] to i64
165 ; CHECK-NEXT: [[I3:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[I2]]
166 ; CHECK-NEXT: store <2 x float> splat (float 1.000000e+00), ptr [[I3]], align 16
167 ; CHECK-NEXT: [[I5:%.*]] = add nuw nsw i32 [[I]], 1
168 ; CHECK-NEXT: [[I6:%.*]] = zext i32 [[I5]] to i64
169 ; CHECK-NEXT: [[I7:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[I6]]
170 ; CHECK-NEXT: store <2 x float> zeroinitializer, ptr [[I7]], align 16
171 ; CHECK-NEXT: ret void
174 %i2 = zext i32 %i to i64
175 %i3 = getelementptr inbounds float, ptr %arg, i64 %i2
176 store <2 x float> <float 1.000000e+00, float 1.000000e+00>, ptr %i3, align 16
177 %i5 = add nuw nsw i32 %i, 1
178 %i6 = zext i32 %i5 to i64
179 %i7 = getelementptr inbounds float, ptr %arg, i64 %i6
180 store <2 x float> <float 0.0, float 0.0>, ptr %i7, align 16
184 define void @ScalableVectorTestPartiallyOverlapping(ptr %arg, i32 %i) {
186 ; The DSE pass will not kill the store because the overlap is partial
187 ; and won't fully clobber the original store.
189 ; CHECK-LABEL: @ScalableVectorTestPartiallyOverlapping(
190 ; CHECK-NEXT: [[EXT_I:%.*]] = zext i32 [[I:%.*]] to i64
191 ; CHECK-NEXT: [[GEP_ARG_I:%.*]] = getelementptr inbounds float, ptr [[ARG:%.*]], i64 [[EXT_I]]
192 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I]], align 8
193 ; CHECK-NEXT: [[I_1:%.*]] = add nuw nsw i32 [[I]], 1
194 ; CHECK-NEXT: [[EXT_I_1:%.*]] = zext i32 [[I_1]] to i64
195 ; CHECK-NEXT: [[GEP_ARG_I_1:%.*]] = getelementptr inbounds float, ptr [[ARG]], i64 [[EXT_I_1]]
196 ; CHECK-NEXT: store <vscale x 2 x float> zeroinitializer, ptr [[GEP_ARG_I_1]], align 8
197 ; CHECK-NEXT: ret void
199 %ext.i = zext i32 %i to i64
200 %gep.arg.i = getelementptr inbounds float, ptr %arg, i64 %ext.i
201 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i
202 %i.1 = add nuw nsw i32 %i, 1
203 %ext.i.1 = zext i32 %i.1 to i64
204 %gep.arg.i.1 = getelementptr inbounds float, ptr %arg, i64 %ext.i.1
205 store <vscale x 2 x float> zeroinitializer, ptr %gep.arg.i.1