1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -sroa -S | FileCheck %s
3 ; RUN: opt < %s -passes=sroa -S | FileCheck %s
5 target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
7 declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
8 declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
11 ; CHECK-LABEL: @test0(
13 ; CHECK-NEXT: [[V2_INT:%.*]] = bitcast float 0.000000e+00 to i32
14 ; CHECK-NEXT: [[SUM1:%.*]] = add i32 0, [[V2_INT]]
15 ; CHECK-NEXT: ret i32 [[SUM1]]
22 %a1.i8 = bitcast i32* %a1 to i8*
23 call void @llvm.lifetime.start.p0i8(i64 4, i8* %a1.i8)
26 %v1 = load i32, i32* %a1
28 call void @llvm.lifetime.end.p0i8(i64 4, i8* %a1.i8)
30 %a2.i8 = bitcast float* %a2 to i8*
31 call void @llvm.lifetime.start.p0i8(i64 4, i8* %a2.i8)
33 store float 0.0, float* %a2
34 %v2 = load float , float * %a2
35 %v2.int = bitcast float %v2 to i32
36 %sum1 = add i32 %v1, %v2.int
38 call void @llvm.lifetime.end.p0i8(i64 4, i8* %a2.i8)
44 ; CHECK-LABEL: @test1(
46 ; CHECK-NEXT: ret i32 0
50 %X = alloca { i32, float }
51 %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
53 %Z = load i32, i32* %Y
57 define i64 @test2(i64 %X) {
58 ; CHECK-LABEL: @test2(
60 ; CHECK-NEXT: br label [[L2:%.*]]
62 ; CHECK-NEXT: ret i64 [[X:%.*]]
67 %B = bitcast [8 x i8]* %A to i64*
72 %Z = load i64, i64* %B
76 define i64 @test2_addrspacecast(i64 %X) {
77 ; CHECK-LABEL: @test2_addrspacecast(
79 ; CHECK-NEXT: br label [[L2:%.*]]
81 ; CHECK-NEXT: ret i64 [[X:%.*]]
86 %B = addrspacecast [8 x i8]* %A to i64 addrspace(1)*
87 store i64 %X, i64 addrspace(1)* %B
91 %Z = load i64, i64 addrspace(1)* %B
95 define i64 @test2_addrspacecast_gep(i64 %X, i16 %idx) {
96 ; CHECK-LABEL: @test2_addrspacecast_gep(
98 ; CHECK-NEXT: br label [[L2:%.*]]
100 ; CHECK-NEXT: ret i64 [[X:%.*]]
104 %A = alloca [256 x i8]
105 %B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
106 %gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 32
107 %gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
108 store i64 %X, i64 addrspace(1)* %gepB, align 1
112 %gepA.bc = bitcast i8* %gepA to i64*
113 %Z = load i64, i64* %gepA.bc, align 1
117 ; Avoid crashing when load/storing at at different offsets.
118 define i64 @test2_addrspacecast_gep_offset(i64 %X) {
119 ; CHECK-LABEL: @test2_addrspacecast_gep_offset(
121 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [10 x i8], align 1
122 ; CHECK-NEXT: [[A_SROA_0_2_GEPB_SROA_IDX:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_0]], i16 0, i16 2
123 ; CHECK-NEXT: [[A_SROA_0_2_GEPB_SROA_CAST:%.*]] = addrspacecast i8* [[A_SROA_0_2_GEPB_SROA_IDX]] to i64 addrspace(1)*
124 ; CHECK-NEXT: store i64 [[X:%.*]], i64 addrspace(1)* [[A_SROA_0_2_GEPB_SROA_CAST]], align 1
125 ; CHECK-NEXT: br label [[L2:%.*]]
127 ; CHECK-NEXT: [[A_SROA_0_0_GEPA_BC_SROA_CAST:%.*]] = bitcast [10 x i8]* [[A_SROA_0]] to i64*
128 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_30_Z:%.*]] = load i64, i64* [[A_SROA_0_0_GEPA_BC_SROA_CAST]], align 1
129 ; CHECK-NEXT: ret i64 [[A_SROA_0_0_A_SROA_0_30_Z]]
133 %A = alloca [256 x i8]
134 %B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
135 %gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 30
136 %gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
137 store i64 %X, i64 addrspace(1)* %gepB, align 1
141 %gepA.bc = bitcast i8* %gepA to i64*
142 %Z = load i64, i64* %gepA.bc, align 1
146 define void @test3(i8* %dst, i8* align 8 %src) {
147 ; CHECK-LABEL: @test3(
149 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [42 x i8], align 1
150 ; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca [99 x i8], align 1
151 ; CHECK-NEXT: [[A_SROA_34:%.*]] = alloca [16 x i8], align 1
152 ; CHECK-NEXT: [[A_SROA_15:%.*]] = alloca [42 x i8], align 1
153 ; CHECK-NEXT: [[A_SROA_16:%.*]] = alloca [7 x i8], align 1
154 ; CHECK-NEXT: [[A_SROA_239:%.*]] = alloca [7 x i8], align 1
155 ; CHECK-NEXT: [[A_SROA_31:%.*]] = alloca [85 x i8], align 1
156 ; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_0]], i64 0, i64 0
157 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_0_0_B_SROA_IDX]], i8* align 8 [[SRC:%.*]], i32 42, i1 false), !tbaa [[TBAA0:![0-9]+]]
158 ; CHECK-NEXT: [[A_SROA_2_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 42
159 ; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_2_0_SRC_SROA_RAW_IDX]], align 2, !tbaa [[TBAA0]]
160 ; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 43
161 ; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX:%.*]] = getelementptr inbounds [99 x i8], [99 x i8]* [[A_SROA_3]], i64 0, i64 0
162 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_B_SROA_IDX]], i8* align 1 [[A_SROA_3_0_SRC_SROA_RAW_IDX]], i32 99, i1 false), !tbaa [[TBAA0]]
163 ; CHECK-NEXT: [[A_SROA_34_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 142
164 ; CHECK-NEXT: [[A_SROA_34_0_B_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
165 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_0_B_SROA_IDX]], i8* align 2 [[A_SROA_34_0_SRC_SROA_RAW_IDX]], i32 16, i1 false), !tbaa [[TBAA0]]
166 ; CHECK-NEXT: [[A_SROA_15_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 158
167 ; CHECK-NEXT: [[A_SROA_15_0_B_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 0
168 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_0_B_SROA_IDX]], i8* align 2 [[A_SROA_15_0_SRC_SROA_RAW_IDX]], i32 42, i1 false), !tbaa [[TBAA0]]
169 ; CHECK-NEXT: [[A_SROA_16_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 200
170 ; CHECK-NEXT: [[A_SROA_16_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
171 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_0_B_SROA_IDX]], i8* align 8 [[A_SROA_16_0_SRC_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
172 ; CHECK-NEXT: [[A_SROA_23_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 207
173 ; CHECK-NEXT: [[A_SROA_23_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_23_0_SRC_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
174 ; CHECK-NEXT: [[A_SROA_239_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 208
175 ; CHECK-NEXT: [[A_SROA_239_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
176 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_0_B_SROA_IDX]], i8* align 8 [[A_SROA_239_0_SRC_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
177 ; CHECK-NEXT: [[A_SROA_31_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 215
178 ; CHECK-NEXT: [[A_SROA_31_0_B_SROA_IDX:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
179 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_0_B_SROA_IDX]], i8* align 1 [[A_SROA_31_0_SRC_SROA_RAW_IDX]], i32 85, i1 false), !tbaa [[TBAA0]]
180 ; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I8_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
181 ; CHECK-NEXT: store i8 1, i8* [[A_SROA_34_0_OVERLAP_1_I8_SROA_IDX]], align 1, !tbaa [[TBAA3:![0-9]+]]
182 ; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I16_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i16*
183 ; CHECK-NEXT: store i16 1, i16* [[A_SROA_34_0_OVERLAP_1_I16_SROA_CAST]], align 1, !tbaa [[TBAA5:![0-9]+]]
184 ; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I32_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i32*
185 ; CHECK-NEXT: store i32 1, i32* [[A_SROA_34_0_OVERLAP_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA7:![0-9]+]]
186 ; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I64_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i64*
187 ; CHECK-NEXT: store i64 1, i64* [[A_SROA_34_0_OVERLAP_1_I64_SROA_CAST]], align 1, !tbaa [[TBAA9:![0-9]+]]
188 ; CHECK-NEXT: [[A_SROA_34_1_OVERLAP_2_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 1
189 ; CHECK-NEXT: [[A_SROA_34_1_OVERLAP_2_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_1_OVERLAP_2_I64_SROA_IDX]] to i64*
190 ; CHECK-NEXT: store i64 2, i64* [[A_SROA_34_1_OVERLAP_2_I64_SROA_CAST]], align 1, !tbaa [[TBAA11:![0-9]+]]
191 ; CHECK-NEXT: [[A_SROA_34_2_OVERLAP_3_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 2
192 ; CHECK-NEXT: [[A_SROA_34_2_OVERLAP_3_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_2_OVERLAP_3_I64_SROA_IDX]] to i64*
193 ; CHECK-NEXT: store i64 3, i64* [[A_SROA_34_2_OVERLAP_3_I64_SROA_CAST]], align 1, !tbaa [[TBAA13:![0-9]+]]
194 ; CHECK-NEXT: [[A_SROA_34_3_OVERLAP_4_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 3
195 ; CHECK-NEXT: [[A_SROA_34_3_OVERLAP_4_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_3_OVERLAP_4_I64_SROA_IDX]] to i64*
196 ; CHECK-NEXT: store i64 4, i64* [[A_SROA_34_3_OVERLAP_4_I64_SROA_CAST]], align 1, !tbaa [[TBAA15:![0-9]+]]
197 ; CHECK-NEXT: [[A_SROA_34_4_OVERLAP_5_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 4
198 ; CHECK-NEXT: [[A_SROA_34_4_OVERLAP_5_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_4_OVERLAP_5_I64_SROA_IDX]] to i64*
199 ; CHECK-NEXT: store i64 5, i64* [[A_SROA_34_4_OVERLAP_5_I64_SROA_CAST]], align 1, !tbaa [[TBAA17:![0-9]+]]
200 ; CHECK-NEXT: [[A_SROA_34_5_OVERLAP_6_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 5
201 ; CHECK-NEXT: [[A_SROA_34_5_OVERLAP_6_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_5_OVERLAP_6_I64_SROA_IDX]] to i64*
202 ; CHECK-NEXT: store i64 6, i64* [[A_SROA_34_5_OVERLAP_6_I64_SROA_CAST]], align 1, !tbaa [[TBAA19:![0-9]+]]
203 ; CHECK-NEXT: [[A_SROA_34_6_OVERLAP_7_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 6
204 ; CHECK-NEXT: [[A_SROA_34_6_OVERLAP_7_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_6_OVERLAP_7_I64_SROA_IDX]] to i64*
205 ; CHECK-NEXT: store i64 7, i64* [[A_SROA_34_6_OVERLAP_7_I64_SROA_CAST]], align 1, !tbaa [[TBAA21:![0-9]+]]
206 ; CHECK-NEXT: [[A_SROA_34_7_OVERLAP_8_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 7
207 ; CHECK-NEXT: [[A_SROA_34_7_OVERLAP_8_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_7_OVERLAP_8_I64_SROA_IDX]] to i64*
208 ; CHECK-NEXT: store i64 8, i64* [[A_SROA_34_7_OVERLAP_8_I64_SROA_CAST]], align 1, !tbaa [[TBAA23:![0-9]+]]
209 ; CHECK-NEXT: [[A_SROA_34_8_OVERLAP_9_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 8
210 ; CHECK-NEXT: [[A_SROA_34_8_OVERLAP_9_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_8_OVERLAP_9_I64_SROA_IDX]] to i64*
211 ; CHECK-NEXT: store i64 9, i64* [[A_SROA_34_8_OVERLAP_9_I64_SROA_CAST]], align 1, !tbaa [[TBAA25:![0-9]+]]
212 ; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
213 ; CHECK-NEXT: store i8 1, i8* [[A_SROA_16_0_OVERLAP2_1_0_I8_SROA_IDX]], align 1, !tbaa [[TBAA27:![0-9]+]]
214 ; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I16_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_16]] to i16*
215 ; CHECK-NEXT: store i16 1, i16* [[A_SROA_16_0_OVERLAP2_1_0_I16_SROA_CAST]], align 1, !tbaa [[TBAA29:![0-9]+]]
216 ; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I32_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_16]] to i32*
217 ; CHECK-NEXT: store i32 1, i32* [[A_SROA_16_0_OVERLAP2_1_0_I32_SROA_CAST]], align 1, !tbaa [[TBAA31:![0-9]+]]
218 ; CHECK-NEXT: [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 1
219 ; CHECK-NEXT: [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_IDX]] to i32*
220 ; CHECK-NEXT: store i32 2, i32* [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA33:![0-9]+]]
221 ; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 2
222 ; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_IDX]] to i32*
223 ; CHECK-NEXT: store i32 3, i32* [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_CAST]], align 1, !tbaa [[TBAA35:![0-9]+]]
224 ; CHECK-NEXT: [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 3
225 ; CHECK-NEXT: [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_IDX]] to i32*
226 ; CHECK-NEXT: store i32 4, i32* [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_CAST]], align 1, !tbaa [[TBAA37:![0-9]+]]
227 ; CHECK-NEXT: [[A_SROA_239_0_OVERLAP2_2_0_I32_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_239]] to i32*
228 ; CHECK-NEXT: store i32 1, i32* [[A_SROA_239_0_OVERLAP2_2_0_I32_SROA_CAST]], align 1, !tbaa [[TBAA39:![0-9]+]]
229 ; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
230 ; CHECK-NEXT: store i8 1, i8* [[A_SROA_239_1_OVERLAP2_2_1_I8_SROA_IDX]], align 1, !tbaa [[TBAA41:![0-9]+]]
231 ; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
232 ; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_IDX]] to i16*
233 ; CHECK-NEXT: store i16 1, i16* [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_CAST]], align 1, !tbaa [[TBAA43:![0-9]+]]
234 ; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
235 ; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_IDX]] to i32*
236 ; CHECK-NEXT: store i32 1, i32* [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA45:![0-9]+]]
237 ; CHECK-NEXT: [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 2
238 ; CHECK-NEXT: [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_IDX]] to i32*
239 ; CHECK-NEXT: store i32 3, i32* [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_CAST]], align 1, !tbaa [[TBAA47:![0-9]+]]
240 ; CHECK-NEXT: [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 3
241 ; CHECK-NEXT: [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_IDX]] to i32*
242 ; CHECK-NEXT: store i32 4, i32* [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_CAST]], align 1, !tbaa [[TBAA49:![0-9]+]]
243 ; CHECK-NEXT: [[A_SROA_15_197_OVERLAP2_PREFIX_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 39
244 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_197_OVERLAP2_PREFIX_SROA_IDX]], i8* align 1 [[SRC]], i32 3, i1 false), !tbaa [[TBAA51:![0-9]+]]
245 ; CHECK-NEXT: [[A_SROA_16_197_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 3
246 ; CHECK-NEXT: [[A_SROA_16_197_OVERLAP2_PREFIX_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
247 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_197_OVERLAP2_PREFIX_SROA_IDX]], i8* align 1 [[A_SROA_16_197_SRC_SROA_RAW_IDX]], i32 5, i1 false), !tbaa [[TBAA51]]
248 ; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 2
249 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_16_2_OVERLAP2_1_2_I8_SROA_IDX]], i8 42, i32 5, i1 false), !tbaa [[TBAA53:![0-9]+]]
250 ; CHECK-NEXT: [[A_SROA_239_0_OVERLAP2_1_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
251 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_239_0_OVERLAP2_1_2_I8_SROA_IDX]], i8 42, i32 2, i1 false), !tbaa [[TBAA53]]
252 ; CHECK-NEXT: [[A_SROA_239_209_OVERLAP2_2_1_I8_SROA_IDX11:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
253 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_209_OVERLAP2_2_1_I8_SROA_IDX11]], i8* align 1 [[SRC]], i32 5, i1 false), !tbaa [[TBAA55:![0-9]+]]
254 ; CHECK-NEXT: [[A_SROA_239_210_OVERLAP2_2_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 2
255 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_210_OVERLAP2_2_2_I8_SROA_IDX]], i8* align 1 [[SRC]], i32 5, i1 false), !tbaa [[TBAA57:![0-9]+]]
256 ; CHECK-NEXT: [[A_SROA_31_210_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 5
257 ; CHECK-NEXT: [[A_SROA_31_210_OVERLAP2_2_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
258 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_210_OVERLAP2_2_2_I8_SROA_IDX]], i8* align 1 [[A_SROA_31_210_SRC_SROA_RAW_IDX]], i32 3, i1 false), !tbaa [[TBAA57]]
259 ; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX1:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_0]], i64 0, i64 0
260 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[DST:%.*]], i8* align 1 [[A_SROA_0_0_B_SROA_IDX1]], i32 42, i1 false), !tbaa [[TBAA59:![0-9]+]]
261 ; CHECK-NEXT: [[A_SROA_2_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 42
262 ; CHECK-NEXT: store i8 0, i8* [[A_SROA_2_0_DST_SROA_RAW_IDX]], align 1, !tbaa [[TBAA59]]
263 ; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 43
264 ; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX3:%.*]] = getelementptr inbounds [99 x i8], [99 x i8]* [[A_SROA_3]], i64 0, i64 0
265 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_3_0_B_SROA_IDX3]], i32 99, i1 false), !tbaa [[TBAA59]]
266 ; CHECK-NEXT: [[A_SROA_34_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 142
267 ; CHECK-NEXT: [[A_SROA_34_0_B_SROA_IDX5:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
268 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_34_0_B_SROA_IDX5]], i32 16, i1 false), !tbaa [[TBAA59]]
269 ; CHECK-NEXT: [[A_SROA_15_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 158
270 ; CHECK-NEXT: [[A_SROA_15_0_B_SROA_IDX6:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 0
271 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_15_0_B_SROA_IDX6]], i32 42, i1 false), !tbaa [[TBAA59]]
272 ; CHECK-NEXT: [[A_SROA_16_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 200
273 ; CHECK-NEXT: [[A_SROA_16_0_B_SROA_IDX7:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
274 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_16_0_B_SROA_IDX7]], i32 7, i1 false), !tbaa [[TBAA59]]
275 ; CHECK-NEXT: [[A_SROA_23_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 207
276 ; CHECK-NEXT: store i8 42, i8* [[A_SROA_23_0_DST_SROA_RAW_IDX]], align 1, !tbaa [[TBAA59]]
277 ; CHECK-NEXT: [[A_SROA_239_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 208
278 ; CHECK-NEXT: [[A_SROA_239_0_B_SROA_IDX10:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
279 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_239_0_B_SROA_IDX10]], i32 7, i1 false), !tbaa [[TBAA59]]
280 ; CHECK-NEXT: [[A_SROA_31_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 215
281 ; CHECK-NEXT: [[A_SROA_31_0_B_SROA_IDX12:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
282 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_31_0_B_SROA_IDX12]], i32 85, i1 false), !tbaa [[TBAA59]]
283 ; CHECK-NEXT: ret void
287 %a = alloca [300 x i8]
289 %b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
290 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* align 8 %src, i32 300, i1 false), !tbaa !0
292 ; Clobber a single element of the array, this should be promotable, and be deleted.
293 %c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
296 ; Make a sequence of overlapping stores to the array. These overlap both in
297 ; forward strides and in shrinking accesses.
298 %overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
299 %overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
300 %overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
301 %overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
302 %overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
303 %overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
304 %overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
305 %overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
306 %overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
307 %overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
308 %overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
309 %overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
310 %overlap.2.i64 = bitcast i8* %overlap.2.i8 to i64*
311 %overlap.3.i64 = bitcast i8* %overlap.3.i8 to i64*
312 %overlap.4.i64 = bitcast i8* %overlap.4.i8 to i64*
313 %overlap.5.i64 = bitcast i8* %overlap.5.i8 to i64*
314 %overlap.6.i64 = bitcast i8* %overlap.6.i8 to i64*
315 %overlap.7.i64 = bitcast i8* %overlap.7.i8 to i64*
316 %overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
317 %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
318 store i8 1, i8* %overlap.1.i8, !tbaa !3
319 store i16 1, i16* %overlap.1.i16, !tbaa !5
320 store i32 1, i32* %overlap.1.i32, !tbaa !7
321 store i64 1, i64* %overlap.1.i64, !tbaa !9
322 store i64 2, i64* %overlap.2.i64, !tbaa !11
323 store i64 3, i64* %overlap.3.i64, !tbaa !13
324 store i64 4, i64* %overlap.4.i64, !tbaa !15
325 store i64 5, i64* %overlap.5.i64, !tbaa !17
326 store i64 6, i64* %overlap.6.i64, !tbaa !19
327 store i64 7, i64* %overlap.7.i64, !tbaa !21
328 store i64 8, i64* %overlap.8.i64, !tbaa !23
329 store i64 9, i64* %overlap.9.i64, !tbaa !25
331 ; Make two sequences of overlapping stores with more gaps and irregularities.
332 %overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
333 %overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
334 %overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
335 %overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
337 %overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
338 %overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
339 %overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
340 %overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
342 %overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
343 %overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
344 %overlap2.1.1.i32 = bitcast i8* %overlap2.1.1.i8 to i32*
345 %overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
346 %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
347 store i8 1, i8* %overlap2.1.0.i8, !tbaa !27
348 store i16 1, i16* %overlap2.1.0.i16, !tbaa !29
349 store i32 1, i32* %overlap2.1.0.i32, !tbaa !31
350 store i32 2, i32* %overlap2.1.1.i32, !tbaa !33
351 store i32 3, i32* %overlap2.1.2.i32, !tbaa !35
352 store i32 4, i32* %overlap2.1.3.i32, !tbaa !37
354 %overlap2.2.0.i32 = bitcast i8* %overlap2.2.0.i8 to i32*
355 %overlap2.2.1.i16 = bitcast i8* %overlap2.2.1.i8 to i16*
356 %overlap2.2.1.i32 = bitcast i8* %overlap2.2.1.i8 to i32*
357 %overlap2.2.2.i32 = bitcast i8* %overlap2.2.2.i8 to i32*
358 %overlap2.2.3.i32 = bitcast i8* %overlap2.2.3.i8 to i32*
359 store i32 1, i32* %overlap2.2.0.i32, !tbaa !39
360 store i8 1, i8* %overlap2.2.1.i8, !tbaa !41
361 store i16 1, i16* %overlap2.2.1.i16, !tbaa !43
362 store i32 1, i32* %overlap2.2.1.i32, !tbaa !45
363 store i32 3, i32* %overlap2.2.2.i32, !tbaa !47
364 store i32 4, i32* %overlap2.2.3.i32, !tbaa !49
366 %overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
367 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i1 false), !tbaa !51
369 ; Bridge between the overlapping areas
370 call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i1 false), !tbaa !53
371 ; ...promoted i8 store...
373 ; Entirely within the second overlap.
374 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i1 false), !tbaa !55
376 ; Trailing past the second overlap.
377 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i1 false), !tbaa !57
379 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i1 false), !tbaa !59
384 define void @test4(i8* %dst, i8* %src) {
385 ; CHECK-LABEL: @test4(
387 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [20 x i8], align 1
388 ; CHECK-NEXT: [[A_SROA_2_SROA_4:%.*]] = alloca [7 x i8], align 1
389 ; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca [10 x i8], align 1
390 ; CHECK-NEXT: [[A_SROA_34_SROA_5:%.*]] = alloca [7 x i8], align 1
391 ; CHECK-NEXT: [[A_SROA_6_SROA_4:%.*]] = alloca [7 x i8], align 1
392 ; CHECK-NEXT: [[A_SROA_7:%.*]] = alloca [40 x i8], align 1
393 ; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX:%.*]] = getelementptr inbounds [20 x i8], [20 x i8]* [[A_SROA_0]], i64 0, i64 0
394 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_0_0_B_SROA_IDX]], i8* align 1 [[SRC:%.*]], i32 20, i1 false), !tbaa [[TBAA0]]
395 ; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 20
396 ; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
397 ; CHECK-NEXT: [[A_SROA_2_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
398 ; CHECK-NEXT: [[A_SROA_2_SROA_3_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 22
399 ; CHECK-NEXT: [[A_SROA_2_SROA_3_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_2_SROA_3_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
400 ; CHECK-NEXT: [[A_SROA_2_SROA_4_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 23
401 ; CHECK-NEXT: [[A_SROA_2_SROA_4_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
402 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_0_B_SROA_IDX]], i8* align 1 [[A_SROA_2_SROA_4_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
403 ; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 30
404 ; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_3]], i64 0, i64 0
405 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_B_SROA_IDX]], i8* align 1 [[A_SROA_3_0_SRC_SROA_RAW_IDX]], i32 10, i1 false), !tbaa [[TBAA0]]
406 ; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 40
407 ; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
408 ; CHECK-NEXT: [[A_SROA_34_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
409 ; CHECK-NEXT: [[A_SROA_34_SROA_4_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 42
410 ; CHECK-NEXT: [[A_SROA_34_SROA_4_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_34_SROA_4_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
411 ; CHECK-NEXT: [[A_SROA_34_SROA_5_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 43
412 ; CHECK-NEXT: [[A_SROA_34_SROA_5_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
413 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_SROA_5_0_B_SROA_IDX]], i8* align 1 [[A_SROA_34_SROA_5_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
414 ; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 50
415 ; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
416 ; CHECK-NEXT: [[A_SROA_6_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
417 ; CHECK-NEXT: [[A_SROA_6_SROA_3_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 52
418 ; CHECK-NEXT: [[A_SROA_6_SROA_3_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_6_SROA_3_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
419 ; CHECK-NEXT: [[A_SROA_6_SROA_4_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 53
420 ; CHECK-NEXT: [[A_SROA_6_SROA_4_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
421 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_0_B_SROA_IDX]], i8* align 1 [[A_SROA_6_SROA_4_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
422 ; CHECK-NEXT: [[A_SROA_7_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 60
423 ; CHECK-NEXT: [[A_SROA_7_0_B_SROA_IDX:%.*]] = getelementptr inbounds [40 x i8], [40 x i8]* [[A_SROA_7]], i64 0, i64 0
424 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_7_0_B_SROA_IDX]], i8* align 1 [[A_SROA_7_0_SRC_SROA_RAW_IDX]], i32 40, i1 false), !tbaa [[TBAA0]]
425 ; CHECK-NEXT: [[A_SROA_2_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
426 ; CHECK-NEXT: [[A_SROA_2_SROA_4_3_A_SRC_1_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
427 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX_SROA_IDX]], i8* align 1 [[A_SROA_2_SROA_4_3_A_SRC_1_SROA_IDX]], i32 7, i1 false), !tbaa [[TBAA3]]
428 ; CHECK-NEXT: [[A_SROA_6_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX16_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
429 ; CHECK-NEXT: [[A_SROA_6_SROA_4_3_A_SRC_2_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
430 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX16_SROA_IDX]], i8* align 1 [[A_SROA_6_SROA_4_3_A_SRC_2_SROA_IDX]], i32 7, i1 false), !tbaa [[TBAA5]]
431 ; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX1:%.*]] = getelementptr inbounds [20 x i8], [20 x i8]* [[A_SROA_0]], i64 0, i64 0
432 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[DST:%.*]], i8* align 1 [[A_SROA_0_0_B_SROA_IDX1]], i32 20, i1 false), !tbaa [[TBAA7]]
433 ; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 20
434 ; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
435 ; CHECK-NEXT: store i16 [[A_SROA_2_SROA_0_0_COPYLOAD]], i16* [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
436 ; CHECK-NEXT: [[A_SROA_2_SROA_3_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 22
437 ; CHECK-NEXT: store i8 [[A_SROA_2_SROA_3_0_COPYLOAD]], i8* [[A_SROA_2_SROA_3_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
438 ; CHECK-NEXT: [[A_SROA_2_SROA_4_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 23
439 ; CHECK-NEXT: [[A_SROA_2_SROA_4_0_B_SROA_IDX22:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
440 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_2_SROA_4_0_B_SROA_IDX22]], i32 7, i1 false), !tbaa [[TBAA7]]
441 ; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 30
442 ; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX3:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_3]], i64 0, i64 0
443 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_3_0_B_SROA_IDX3]], i32 10, i1 false), !tbaa [[TBAA7]]
444 ; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 40
445 ; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
446 ; CHECK-NEXT: store i16 [[A_SROA_6_SROA_0_0_COPYLOAD]], i16* [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
447 ; CHECK-NEXT: [[A_SROA_34_SROA_4_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 42
448 ; CHECK-NEXT: store i8 [[A_SROA_6_SROA_3_0_COPYLOAD]], i8* [[A_SROA_34_SROA_4_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
449 ; CHECK-NEXT: [[A_SROA_34_SROA_5_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 43
450 ; CHECK-NEXT: [[A_SROA_34_SROA_5_0_B_SROA_IDX15:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
451 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_SROA_5_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_34_SROA_5_0_B_SROA_IDX15]], i32 7, i1 false), !tbaa [[TBAA7]]
452 ; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 50
453 ; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
454 ; CHECK-NEXT: store i16 [[A_SROA_6_SROA_0_0_COPYLOAD]], i16* [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
455 ; CHECK-NEXT: [[A_SROA_6_SROA_3_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 52
456 ; CHECK-NEXT: store i8 [[A_SROA_6_SROA_3_0_COPYLOAD]], i8* [[A_SROA_6_SROA_3_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
457 ; CHECK-NEXT: [[A_SROA_6_SROA_4_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 53
458 ; CHECK-NEXT: [[A_SROA_6_SROA_4_0_B_SROA_IDX19:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
459 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_6_SROA_4_0_B_SROA_IDX19]], i32 7, i1 false), !tbaa [[TBAA7]]
460 ; CHECK-NEXT: [[A_SROA_7_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 60
461 ; CHECK-NEXT: [[A_SROA_7_0_B_SROA_IDX8:%.*]] = getelementptr inbounds [40 x i8], [40 x i8]* [[A_SROA_7]], i64 0, i64 0
462 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_7_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_7_0_B_SROA_IDX8]], i32 40, i1 false), !tbaa [[TBAA7]]
463 ; CHECK-NEXT: ret void
467 %a = alloca [100 x i8]
469 %b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
470 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i1 false), !tbaa !0
472 %a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
473 %a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
474 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i1 false), !tbaa !3
476 ; Clobber a single element of the array, this should be promotable, and be deleted.
477 %c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
480 %a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
481 call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i1 false), !tbaa !5
483 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i1 false), !tbaa !7
488 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
489 declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i1) nounwind
490 declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
491 declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
493 define i16 @test5() {
494 ; CHECK-LABEL: @test5(
496 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float 0.000000e+00 to i32
497 ; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 16
498 ; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_2_EXTRACT_SHIFT]] to i16
499 ; CHECK-NEXT: ret i16 [[A_SROA_0_2_EXTRACT_TRUNC]]
504 %fptr = bitcast [4 x i8]* %a to float*
505 store float 0.0, float* %fptr
506 %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
507 %iptr = bitcast i8* %ptr to i16*
508 %val = load i16, i16* %iptr
512 define i16 @test5_multi_addrspace_access() {
513 ; CHECK-LABEL: @test5_multi_addrspace_access(
515 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float 0.000000e+00 to i32
516 ; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 16
517 ; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_2_EXTRACT_SHIFT]] to i16
518 ; CHECK-NEXT: ret i16 [[A_SROA_0_2_EXTRACT_TRUNC]]
523 %fptr = bitcast [4 x i8]* %a to float*
524 %fptr.as1 = addrspacecast float* %fptr to float addrspace(1)*
525 store float 0.0, float addrspace(1)* %fptr.as1
526 %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
527 %iptr = bitcast i8* %ptr to i16*
528 %val = load i16, i16* %iptr
532 define i32 @test6() {
533 ; CHECK-LABEL: @test6(
535 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
536 ; CHECK-NEXT: store volatile i32 707406378, i32* [[A_SROA_0]], align 4
537 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_VAL:%.*]] = load i32, i32* [[A_SROA_0]], align 4
538 ; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_VAL]]
543 %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
544 call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i1 true)
545 %iptr = bitcast i8* %ptr to i32*
546 %val = load i32, i32* %iptr
550 define void @test7(i8* %src, i8* %dst) {
551 ; CHECK-LABEL: @test7(
553 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
554 ; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
555 ; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, i32* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
556 ; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_COPYLOAD]], i32* [[A_SROA_0]], align 4, !tbaa [[TBAA0]]
557 ; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i32*
558 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD1:%.*]] = load volatile i32, i32* [[A_SROA_0]], align 4, !tbaa [[TBAA3]]
559 ; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD1]], i32* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA3]]
560 ; CHECK-NEXT: ret void
565 %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
566 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
567 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
572 %S1 = type { i32, i32, [16 x i8] }
573 %S2 = type { %S1*, %S2* }
575 define %S2 @test8(%S2* %arg) {
576 ; CHECK-LABEL: @test8(
578 ; CHECK-NEXT: [[S2_NEXT_PTR:%.*]] = getelementptr [[S2:%.*]], %S2* [[ARG:%.*]], i64 0, i32 1
579 ; CHECK-NEXT: [[S2_NEXT:%.*]] = load %S2*, %S2** [[S2_NEXT_PTR]], align 8, !tbaa [[TBAA0]]
580 ; CHECK-NEXT: [[S2_NEXT_S1_PTR:%.*]] = getelementptr [[S2]], %S2* [[S2_NEXT]], i64 0, i32 0
581 ; CHECK-NEXT: [[S2_NEXT_S1:%.*]] = load %S1*, %S1** [[S2_NEXT_S1_PTR]], align 8, !tbaa [[TBAA3]]
582 ; CHECK-NEXT: [[S2_NEXT_NEXT_PTR:%.*]] = getelementptr [[S2]], %S2* [[S2_NEXT]], i64 0, i32 1
583 ; CHECK-NEXT: [[S2_NEXT_NEXT:%.*]] = load %S2*, %S2** [[S2_NEXT_NEXT_PTR]], align 8, !tbaa [[TBAA7]]
584 ; CHECK-NEXT: [[RESULT1:%.*]] = insertvalue [[S2]] undef, %S1* [[S2_NEXT_S1]], 0
585 ; CHECK-NEXT: [[RESULT2:%.*]] = insertvalue [[S2]] [[RESULT1]], %S2* [[S2_NEXT_NEXT]], 1
586 ; CHECK-NEXT: ret [[S2]] [[RESULT2]]
591 %s2.next.ptr = getelementptr %S2, %S2* %arg, i64 0, i32 1
592 %s2.next = load %S2*, %S2** %s2.next.ptr, !tbaa !0
594 %s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
595 %s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr, !tbaa !3
596 %new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
597 store %S1* %s2.next.s1, %S1** %new.s1.ptr, !tbaa !5
598 %s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
599 %s2.next.next = load %S2*, %S2** %s2.next.next.ptr, !tbaa !7
600 %new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
601 store %S2* %s2.next.next, %S2** %new.next.ptr, !tbaa !9
603 %new.s1 = load %S1*, %S1** %new.s1.ptr
604 %result1 = insertvalue %S2 undef, %S1* %new.s1, 0
605 %new.next = load %S2*, %S2** %new.next.ptr
606 %result2 = insertvalue %S2 %result1, %S2* %new.next, 1
610 define i64 @test9() {
611 ; Ensure we can handle loads off the end of an alloca even when wrapped in
612 ; weird bit casts and types. This is valid IR due to the alignment and masking
613 ; off the bits past the end of the alloca.
615 ; CHECK-LABEL: @test9(
617 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i8 26 to i64
618 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_3_0_INSERT_EXT]], 16
619 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, -16711681
620 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
621 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 0 to i64
622 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_2_0_INSERT_EXT]], 8
623 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_3_0_INSERT_INSERT]], -65281
624 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
625 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i64
626 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_2_0_INSERT_INSERT]], -256
627 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
628 ; CHECK-NEXT: [[RESULT:%.*]] = and i64 [[A_SROA_0_0_INSERT_INSERT]], 16777215
629 ; CHECK-NEXT: ret i64 [[RESULT]]
633 %a = alloca { [3 x i8] }, align 8
634 %gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
635 store i8 0, i8* %gep1, align 1
636 %gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
637 store i8 0, i8* %gep2, align 1
638 %gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
639 store i8 26, i8* %gep3, align 1
640 %cast = bitcast { [3 x i8] }* %a to { i64 }*
641 %elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
642 %load = load i64, i64* %elt
643 %result = and i64 %load, 16777215
647 define %S2* @test10() {
648 ; CHECK-LABEL: @test10(
650 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint %S2* null to i64
651 ; CHECK-NEXT: ret %S2* null
656 %ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
657 call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i1 false)
658 %s2ptrptr = bitcast i8* %ptr to %S2**
659 %s2ptr = load %S2*, %S2** %s2ptrptr
663 define i32 @test11() {
664 ; CHECK-LABEL: @test11(
666 ; CHECK-NEXT: br i1 undef, label [[GOOD:%.*]], label [[BAD:%.*]]
668 ; CHECK-NEXT: ret i32 0
670 ; CHECK-NEXT: ret i32 undef
675 br i1 undef, label %good, label %bad
678 %Y = getelementptr i32, i32* %X, i64 0
680 %Z = load i32, i32* %Y
684 %Y2 = getelementptr i32, i32* %X, i64 1
685 store i32 0, i32* %Y2
686 %Z2 = load i32, i32* %Y2
690 define i8 @test12() {
691 ; We fully promote these to the i24 load or store size, resulting in just masks
692 ; and other operations that instcombine will fold, but no alloca.
694 ; CHECK-LABEL: @test12(
696 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i8 0 to i24
697 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_3_0_INSERT_EXT]], 16
698 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i24 undef, 65535
699 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
700 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 0 to i24
701 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_2_0_INSERT_EXT]], 8
702 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_3_0_INSERT_INSERT]], -65281
703 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
704 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i24
705 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_2_0_INSERT_INSERT]], -256
706 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
707 ; CHECK-NEXT: [[B_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[A_SROA_0_0_INSERT_INSERT]] to i8
708 ; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 8
709 ; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_2_0_EXTRACT_SHIFT]] to i8
710 ; CHECK-NEXT: [[B_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 16
711 ; CHECK-NEXT: [[B_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_3_0_EXTRACT_SHIFT]] to i8
712 ; CHECK-NEXT: [[BSUM0:%.*]] = add i8 [[B_SROA_0_0_EXTRACT_TRUNC]], [[B_SROA_2_0_EXTRACT_TRUNC]]
713 ; CHECK-NEXT: [[BSUM1:%.*]] = add i8 [[BSUM0]], [[B_SROA_3_0_EXTRACT_TRUNC]]
714 ; CHECK-NEXT: ret i8 [[BSUM1]]
721 %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
722 store i8 0, i8* %a0ptr
723 %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
724 store i8 0, i8* %a1ptr
725 %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
726 store i8 0, i8* %a2ptr
727 %aiptr = bitcast [3 x i8]* %a to i24*
728 %ai = load i24, i24* %aiptr
730 %biptr = bitcast [3 x i8]* %b to i24*
731 store i24 %ai, i24* %biptr
732 %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
733 %b0 = load i8, i8* %b0ptr
734 %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
735 %b1 = load i8, i8* %b1ptr
736 %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
737 %b2 = load i8, i8* %b2ptr
739 %bsum0 = add i8 %b0, %b1
740 %bsum1 = add i8 %bsum0, %b2
744 define i32 @test13() {
745 ; Ensure we don't crash and handle undefined loads that straddle the end of the
747 ; CHECK-LABEL: @test13(
749 ; CHECK-NEXT: [[A_SROA_2_2_LOAD_EXT:%.*]] = zext i8 0 to i16
750 ; CHECK-NEXT: [[RET:%.*]] = zext i16 [[A_SROA_2_2_LOAD_EXT]] to i32
751 ; CHECK-NEXT: ret i32 [[RET]]
755 %a = alloca [3 x i8], align 2
756 %b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
757 store i8 0, i8* %b0ptr
758 %b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
759 store i8 0, i8* %b1ptr
760 %b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
761 store i8 0, i8* %b2ptr
762 %iptrcast = bitcast [3 x i8]* %a to i16*
763 %iptrgep = getelementptr i16, i16* %iptrcast, i64 1
764 %i = load i16, i16* %iptrgep
765 %ret = zext i16 %i to i32
769 %test14.struct = type { [3 x i32] }
771 define void @test14(...) nounwind uwtable {
772 ; This is a strange case where we split allocas into promotable partitions, but
773 ; also gain enough data to prove they must be dead allocas due to GEPs that walk
774 ; across two adjacent allocas. Test that we don't try to promote or otherwise
775 ; do bad things to these dead allocas, they should just be removed.
776 ; CHECK-LABEL: @test14(
778 ; CHECK-NEXT: ret void
782 %a = alloca %test14.struct
783 %p = alloca %test14.struct*
784 %0 = bitcast %test14.struct* %a to i8*
785 %1 = getelementptr i8, i8* %0, i64 12
786 %2 = bitcast i8* %1 to %test14.struct*
787 %3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
788 %4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
789 %5 = bitcast [3 x i32]* %3 to i32*
790 %6 = bitcast [3 x i32]* %4 to i32*
791 %7 = load i32, i32* %6, align 4
792 store i32 %7, i32* %5, align 4
793 %8 = getelementptr inbounds i32, i32* %5, i32 1
794 %9 = getelementptr inbounds i32, i32* %6, i32 1
795 %10 = load i32, i32* %9, align 4
796 store i32 %10, i32* %8, align 4
797 %11 = getelementptr inbounds i32, i32* %5, i32 2
798 %12 = getelementptr inbounds i32, i32* %6, i32 2
799 %13 = load i32, i32* %12, align 4
800 store i32 %13, i32* %11, align 4
804 define i32 @test15(i1 %flag) nounwind uwtable {
805 ; Ensure that when there are dead instructions using an alloca that are not
806 ; loads or stores we still delete them during partitioning and rewriting.
807 ; Otherwise we'll go to promote them while thy still have unpromotable uses.
808 ; CHECK-LABEL: @test15(
810 ; CHECK-NEXT: br label [[LOOP:%.*]]
812 ; CHECK-NEXT: br label [[LOOP]]
823 %dead3 = phi i8* [ %gep3, %loop ], [ null, %entry ]
825 store i64 1879048192, i64* %l0, align 8
826 %bc0 = bitcast i64* %l0 to i8*
827 %gep0 = getelementptr i8, i8* %bc0, i64 3
828 %dead0 = bitcast i8* %gep0 to i64*
830 store i64 1879048192, i64* %l1, align 8
831 %bc1 = bitcast i64* %l1 to i8*
832 %gep1 = getelementptr i8, i8* %bc1, i64 3
833 %dead1 = getelementptr i8, i8* %gep1, i64 1
835 store i64 1879048192, i64* %l2, align 8
836 %bc2 = bitcast i64* %l2 to i8*
837 %gep2.1 = getelementptr i8, i8* %bc2, i64 1
838 %gep2.2 = getelementptr i8, i8* %bc2, i64 3
839 ; Note that this select should get visited multiple times due to using two
840 ; different GEPs off the same alloca. We should only delete it once.
841 %dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
843 store i64 1879048192, i64* %l3, align 8
844 %bc3 = bitcast i64* %l3 to i8*
845 %gep3 = getelementptr i8, i8* %bc3, i64 3
850 define void @test16(i8* %src, i8* %dst) {
851 ; Ensure that we can promote an alloca of [3 x i8] to an i24 SSA value.
852 ; CHECK-LABEL: @test16(
854 ; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i24*
855 ; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i24, i24* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
856 ; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i24*
857 ; CHECK-NEXT: store i24 0, i24* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA5]]
858 ; CHECK-NEXT: ret void
863 %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
864 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 false), !tbaa !0
865 %cast = bitcast i8* %ptr to i24*
866 store i24 0, i24* %cast, !tbaa !3
867 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 false), !tbaa !5
871 define void @test17(i8* %src, i8* %dst) {
872 ; Ensure that we can rewrite unpromotable memcpys which extend past the end of
874 ; CHECK-LABEL: @test17(
876 ; CHECK-NEXT: [[A:%.*]] = alloca [3 x i8], align 1
877 ; CHECK-NEXT: [[PTR:%.*]] = getelementptr [3 x i8], [3 x i8]* [[A]], i32 0, i32 0
878 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[PTR]], i8* [[SRC:%.*]], i32 4, i1 true), !tbaa [[TBAA0]]
879 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DST:%.*]], i8* [[PTR]], i32 4, i1 true), !tbaa [[TBAA3]]
880 ; CHECK-NEXT: ret void
885 %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
886 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
887 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
891 define void @test18(i8* %src, i8* %dst, i32 %size) {
892 ; Preserve transfer instrinsics with a variable size, even if they overlap with
893 ; fixed size operations. Further, continue to split and promote allocas preceding
894 ; the variable sized intrinsic.
895 ; CHECK-LABEL: @test18(
897 ; CHECK-NEXT: [[A_SROA_33:%.*]] = alloca [34 x i8], align 1
898 ; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
899 ; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i32, i32* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
900 ; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 4
901 ; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_3_0_SRC_SROA_IDX]] to i32*
902 ; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i32, i32* [[A_SROA_3_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
903 ; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
904 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX]], i8* [[SRC]], i32 [[SIZE:%.*]], i1 false), !tbaa [[TBAA3]]
905 ; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX6:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
906 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX6]], i8 42, i32 [[SIZE]], i1 false), !tbaa [[TBAA5]]
907 ; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i32*
908 ; CHECK-NEXT: store i32 42, i32* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA9]]
909 ; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 4
910 ; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_3_0_DST_SROA_IDX]] to i32*
911 ; CHECK-NEXT: store i32 [[A_SROA_3_0_COPYLOAD]], i32* [[A_SROA_3_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA9]]
912 ; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX7:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
913 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DST]], i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX7]], i32 [[SIZE]], i1 false), !tbaa [[TBAA11]]
914 ; CHECK-NEXT: ret void
918 %a = alloca [42 x i8]
919 %ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
920 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 false), !tbaa !0
921 %ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
922 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i1 false), !tbaa !3
923 call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i1 false), !tbaa !5
924 %cast = bitcast i8* %ptr to i32*
925 store i32 42, i32* %cast, !tbaa !7
926 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 false), !tbaa !9
927 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr2, i32 %size, i1 false), !tbaa !11
931 %opaque = type opaque
933 define i32 @test19(%opaque* %x) {
934 ; This input will cause us to try to compute a natural GEP when rewriting
935 ; pointers in such a way that we try to GEP through the opaque type. Previously,
936 ; a check for an unsized type was missing and this crashed. Ensure it behaves
938 ; CHECK-LABEL: @test19(
940 ; CHECK-NEXT: [[CAST1:%.*]] = bitcast %opaque* [[X:%.*]] to i8*
941 ; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_CAST:%.*]] = bitcast i8* [[CAST1]] to i64*
942 ; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i64, i64* [[A_SROA_0_0_CAST1_SROA_CAST]], align 1
943 ; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[CAST1]], i64 8
944 ; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_0_CAST1_SROA_IDX]] to i8**
945 ; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8*, i8** [[A_SROA_2_0_CAST1_SROA_CAST]], align 1
946 ; CHECK-NEXT: ret i32 undef
950 %a = alloca { i64, i8* }
951 %cast1 = bitcast %opaque* %x to i8*
952 %cast2 = bitcast { i64, i8* }* %a to i8*
953 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i1 false)
954 %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
955 %val = load i64, i64* %gep
959 declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) nounwind
961 define i32 @test19_addrspacecast(%opaque* %x) {
962 ; This input will cause us to try to compute a natural GEP when rewriting
963 ; pointers in such a way that we try to GEP through the opaque type. Previously,
964 ; a check for an unsized type was missing and this crashed. Ensure it behaves
966 ; CHECK-LABEL: @test19_addrspacecast(
968 ; CHECK-NEXT: [[CAST1:%.*]] = addrspacecast %opaque* [[X:%.*]] to i8 addrspace(1)*
969 ; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_CAST:%.*]] = bitcast i8 addrspace(1)* [[CAST1]] to i64 addrspace(1)*
970 ; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i64, i64 addrspace(1)* [[A_SROA_0_0_CAST1_SROA_CAST]], align 1
971 ; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[CAST1]], i16 8
972 ; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_CAST:%.*]] = bitcast i8 addrspace(1)* [[A_SROA_2_0_CAST1_SROA_IDX]] to i8* addrspace(1)*
973 ; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8*, i8* addrspace(1)* [[A_SROA_2_0_CAST1_SROA_CAST]], align 1
974 ; CHECK-NEXT: ret i32 undef
978 %a = alloca { i64, i8* }
979 %cast1 = addrspacecast %opaque* %x to i8 addrspace(1)*
980 %cast2 = bitcast { i64, i8* }* %a to i8*
981 call void @llvm.memcpy.p0i8.p1i8.i32(i8* %cast2, i8 addrspace(1)* %cast1, i32 16, i32 1, i1 false)
982 %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
983 %val = load i64, i64* %gep
987 define i32 @test20() {
988 ; Ensure we can track negative offsets (before the beginning of the alloca) and
989 ; negative relative offsets from offsets starting past the end of the alloca.
990 ; CHECK-LABEL: @test20(
992 ; CHECK-NEXT: [[SUM1:%.*]] = add i32 1, 2
993 ; CHECK-NEXT: [[SUM2:%.*]] = add i32 [[SUM1]], 3
994 ; CHECK-NEXT: ret i32 [[SUM2]]
998 %a = alloca [3 x i32]
999 %gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
1000 store i32 1, i32* %gep1
1001 %gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
1002 %gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
1003 store i32 2, i32* %gep2.2
1004 %gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
1005 %gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
1006 store i32 3, i32* %gep3.2
1008 %load1 = load i32, i32* %gep1
1009 %load2 = load i32, i32* %gep2.2
1010 %load3 = load i32, i32* %gep3.2
1011 %sum1 = add i32 %load1, %load2
1012 %sum2 = add i32 %sum1, %load3
1016 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
1018 define i8 @test21() {
1019 ; Test allocations and offsets which border on overflow of the int64_t used
1020 ; internally. This is really awkward to really test as LLVM doesn't really
1021 ; support such extreme constructs cleanly.
1022 ; CHECK-LABEL: @test21(
1023 ; CHECK-NEXT: entry:
1024 ; CHECK-NEXT: [[RESULT:%.*]] = or i8 -1, -1
1025 ; CHECK-NEXT: ret i8 [[RESULT]]
1029 %a = alloca [2305843009213693951 x i8]
1030 %gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
1031 store i8 255, i8* %gep0
1032 %gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
1033 %gep2 = getelementptr i8, i8* %gep1, i64 -1
1034 call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i1 false)
1035 %gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
1036 %gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
1037 %gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
1038 store i8 255, i8* %gep5
1039 %cast1 = bitcast i8* %gep4 to i32*
1040 store i32 0, i32* %cast1
1041 %load = load i8, i8* %gep0
1042 %gep6 = getelementptr i8, i8* %gep0, i32 1
1043 %load2 = load i8, i8* %gep6
1044 %result = or i8 %load, %load2
1048 %PR13916.struct = type { i8 }
1050 define void @PR13916.1() {
1051 ; Ensure that we handle overlapping memcpy intrinsics correctly, especially in
1052 ; the case where there is a directly identical value for both source and dest.
1053 ; CHECK-LABEL: @PR13916.1(
1054 ; CHECK-NEXT: entry:
1055 ; CHECK-NEXT: ret void
1060 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i1 false)
1061 %tmp2 = load i8, i8* %a
1065 define void @PR13916.2() {
1066 ; Check whether we continue to handle them correctly when they start off with
1067 ; different pointer value chains, but during rewriting we coalesce them into the
1069 ; CHECK-LABEL: @PR13916.2(
1070 ; CHECK-NEXT: entry:
1071 ; CHECK-NEXT: br i1 undef, label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1073 ; CHECK-NEXT: br label [[IF_END]]
1075 ; CHECK-NEXT: ret void
1079 %a = alloca %PR13916.struct, align 1
1080 br i1 undef, label %if.then, label %if.end
1083 %tmp0 = bitcast %PR13916.struct* %a to i8*
1084 %tmp1 = bitcast %PR13916.struct* %a to i8*
1085 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp0, i8* %tmp1, i32 1, i1 false)
1089 %gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
1090 %tmp2 = load i8, i8* %gep
1094 define void @PR13990() {
1095 ; Ensure we can handle cases where processing one alloca causes the other
1096 ; alloca to become dead and get deleted. This might crash or fail under
1097 ; Valgrind if we regress.
1098 ; CHECK-LABEL: @PR13990(
1099 ; CHECK-NEXT: entry:
1100 ; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
1102 ; CHECK-NEXT: br i1 undef, label [[BB2]], label [[BB3:%.*]]
1104 ; CHECK-NEXT: br i1 undef, label [[BB3]], label [[BB4:%.*]]
1106 ; CHECK-NEXT: unreachable
1108 ; CHECK-NEXT: unreachable
1114 br i1 undef, label %bb1, label %bb2
1117 store i8* undef, i8** %tmp2
1118 br i1 undef, label %bb2, label %bb3
1121 %tmp50 = select i1 undef, i8** %tmp2, i8** %tmp1
1122 br i1 undef, label %bb3, label %bb4
1131 define double @PR13969(double %x) {
1132 ; Check that we detect when promotion will un-escape an alloca and iterate to
1133 ; re-try running SROA over that alloca. Without that, the two allocas that are
1134 ; stored into a dead alloca don't get rewritten and promoted.
1135 ; CHECK-LABEL: @PR13969(
1136 ; CHECK-NEXT: entry:
1137 ; CHECK-NEXT: ret double [[X:%.*]]
1145 store double %x, double* %a
1146 store double* %c, double** %b
1147 store double* %a, double** %b
1148 store double %x, double* %c
1149 %ret = load double, double* %a
1154 %PR14034.struct = type { { {} }, i32, %PR14034.list }
1155 %PR14034.list = type { %PR14034.list*, %PR14034.list* }
1157 define void @PR14034() {
1158 ; This test case tries to form GEPs into the empty leading struct members, and
1159 ; subsequently crashed (under valgrind) before we fixed the PR. The important
1160 ; thing is to handle empty structs gracefully.
1161 ; CHECK-LABEL: @PR14034(
1162 ; CHECK-NEXT: entry:
1163 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [12 x i8], align 8
1164 ; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds [12 x i8], [12 x i8]* [[A_SROA_0]], i64 0, i64 0
1165 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 undef, i8* align 8 [[A_SROA_0_0_CAST1_SROA_IDX]], i32 12, i1 false)
1166 ; CHECK-NEXT: ret void
1170 %a = alloca %PR14034.struct
1171 %list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
1172 %prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
1173 store %PR14034.list* undef, %PR14034.list** %prev
1174 %cast0 = bitcast %PR14034.struct* undef to i8*
1175 %cast1 = bitcast %PR14034.struct* %a to i8*
1176 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast0, i8* %cast1, i32 12, i1 false)
1180 define i32 @test22(i32 %x) {
1181 ; Test that SROA and promotion is not confused by a grab bax mixture of pointer
1182 ; types involving wrapper aggregates and zero-length aggregate members.
1183 ; CHECK-LABEL: @test22(
1184 ; CHECK-NEXT: entry:
1185 ; CHECK-NEXT: [[WRAP1:%.*]] = insertvalue [1 x { i32 }] undef, i32 [[X:%.*]], 0, 0
1186 ; CHECK-NEXT: [[WRAP1_FCA_0_0_EXTRACT:%.*]] = extractvalue [1 x { i32 }] [[WRAP1]], 0, 0
1187 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[WRAP1_FCA_0_0_EXTRACT]] to float
1188 ; CHECK-NEXT: [[LOAD1_FCA_0_0_0_INSERT:%.*]] = insertvalue { [1 x { float }] } undef, float [[TMP0]], 0, 0, 0
1189 ; CHECK-NEXT: [[UNWRAP1:%.*]] = extractvalue { [1 x { float }] } [[LOAD1_FCA_0_0_0_INSERT]], 0, 0
1190 ; CHECK-NEXT: [[WRAP2:%.*]] = insertvalue { {}, { float }, [0 x i8] } undef, { float } [[UNWRAP1]], 1
1191 ; CHECK-NEXT: [[WRAP2_FCA_1_0_EXTRACT:%.*]] = extractvalue { {}, { float }, [0 x i8] } [[WRAP2]], 1, 0
1192 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[WRAP2_FCA_1_0_EXTRACT]] to <4 x i8>
1193 ; CHECK-NEXT: [[VALCAST1:%.*]] = bitcast <4 x i8> [[TMP1]] to i32
1194 ; CHECK-NEXT: [[WRAP3:%.*]] = insertvalue [1 x [1 x i32]] undef, i32 [[VALCAST1]], 0, 0
1195 ; CHECK-NEXT: [[WRAP4:%.*]] = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] [[WRAP3]], 0
1196 ; CHECK-NEXT: [[WRAP4_FCA_0_0_0_EXTRACT:%.*]] = extractvalue { [1 x [1 x i32]], {} } [[WRAP4]], 0, 0, 0
1197 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[WRAP4_FCA_0_0_0_EXTRACT]] to <4 x i8>
1198 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i8> [[TMP2]] to float
1199 ; CHECK-NEXT: [[LOAD4_FCA_1_INSERT:%.*]] = insertvalue { {}, float, {} } undef, float [[TMP3]], 1
1200 ; CHECK-NEXT: [[UNWRAP2:%.*]] = extractvalue { {}, float, {} } [[LOAD4_FCA_1_INSERT]], 1
1201 ; CHECK-NEXT: [[VALCAST2:%.*]] = bitcast float [[UNWRAP2]] to i32
1202 ; CHECK-NEXT: ret i32 [[VALCAST2]]
1206 %a1 = alloca { { [1 x { i32 }] } }
1207 %a2 = alloca { {}, { float }, [0 x i8] }
1208 %a3 = alloca { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }
1210 %wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
1211 %gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
1212 store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
1214 %gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
1215 %ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
1216 %load1 = load { [1 x { float }] }, { [1 x { float }] }* %ptrcast1
1217 %unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
1219 %wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
1220 store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
1222 %gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
1223 %ptrcast2 = bitcast float* %gep3 to <4 x i8>*
1224 %load3 = load <4 x i8>, <4 x i8>* %ptrcast2
1225 %valcast1 = bitcast <4 x i8> %load3 to i32
1227 %wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
1228 %wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
1229 %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
1230 %ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
1231 store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
1233 %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
1234 %ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
1235 %load4 = load { {}, float, {} }, { {}, float, {} }* %ptrcast4
1236 %unwrap2 = extractvalue { {}, float, {} } %load4, 1
1237 %valcast2 = bitcast float %unwrap2 to i32
1242 define void @PR14059.1(double* %d) {
1243 ; In PR14059 a peculiar construct was identified as something that is used
1244 ; pervasively in ARM's ABI-calling-convention lowering: the passing of a struct
1245 ; of doubles via an array of i32 in order to place the data into integer
1246 ; registers. This in turn was missed as an optimization by SROA due to the
1247 ; partial loads and stores of integers to the double alloca we were trying to
1248 ; form and promote. The solution is to widen the integer operations to be
1249 ; whole-alloca operations, and perform the appropriate bitcasting on the
1250 ; *values* rather than the pointers. When this works, partial reads and writes
1251 ; via integers can be promoted away.
1252 ; CHECK-LABEL: @PR14059.1(
1253 ; CHECK-NEXT: entry:
1254 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double undef to i64
1255 ; CHECK-NEXT: [[X_SROA_0_I_0_INSERT_MASK:%.*]] = and i64 [[TMP0]], -4294967296
1256 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_MASK]] to double
1257 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
1258 ; CHECK-NEXT: [[X_SROA_0_I_2_INSERT_MASK:%.*]] = and i64 [[TMP2]], -281474976645121
1259 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_MASK]] to double
1260 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
1261 ; CHECK-NEXT: [[X_SROA_0_I_4_D_RAW_SROA_CAST:%.*]] = bitcast double* [[D:%.*]] to i32*
1262 ; CHECK-NEXT: [[X_SROA_0_I_4_COPYLOAD:%.*]] = load i32, i32* [[X_SROA_0_I_4_D_RAW_SROA_CAST]], align 1
1263 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast double 0.000000e+00 to i64
1264 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_EXT:%.*]] = zext i32 [[X_SROA_0_I_4_COPYLOAD]] to i64
1265 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_SHIFT:%.*]] = shl i64 [[X_SROA_0_I_4_INSERT_EXT]], 32
1266 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_MASK4:%.*]] = and i64 [[TMP5]], 4294967295
1267 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_INSERT5:%.*]] = or i64 [[X_SROA_0_I_4_INSERT_MASK4]], [[X_SROA_0_I_4_INSERT_SHIFT]]
1268 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[X_SROA_0_I_4_INSERT_INSERT5]] to double
1269 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast double [[TMP6]] to i64
1270 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_MASK:%.*]] = and i64 [[TMP7]], 4294967295
1271 ; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_4_INSERT_MASK]], 4607182418800017408
1272 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i64 [[X_SROA_0_I_4_INSERT_INSERT]] to double
1273 ; CHECK-NEXT: [[ACCUM_REAL_I:%.*]] = load double, double* [[D]], align 8
1274 ; CHECK-NEXT: [[ADD_R_I:%.*]] = fadd double [[ACCUM_REAL_I]], [[TMP8]]
1275 ; CHECK-NEXT: store double [[ADD_R_I]], double* [[D]], align 8
1276 ; CHECK-NEXT: ret void
1280 %X.sroa.0.i = alloca double, align 8
1281 %0 = bitcast double* %X.sroa.0.i to i8*
1282 call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
1284 ; Store to the low 32-bits...
1285 %X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
1286 store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
1288 ; Also use a memset to the middle 32-bits for fun.
1289 %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
1290 call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i1 false)
1292 ; Or a memset of the whole thing.
1293 call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i1 false)
1295 ; Write to the high 32-bits with a memcpy.
1296 %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
1297 %d.raw = bitcast double* %d to i8*
1298 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i1 false)
1300 ; Store to the high 32-bits...
1301 %X.sroa.0.4.cast5.i = bitcast i8* %X.sroa.0.4.raw_idx4.i to i32*
1302 store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
1304 ; Do the actual math...
1305 %X.sroa.0.0.load1.i = load double, double* %X.sroa.0.i, align 8
1306 %accum.real.i = load double, double* %d, align 8
1307 %add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
1308 store double %add.r.i, double* %d, align 8
1309 call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
1313 define i64 @PR14059.2({ float, float }* %phi) {
1314 ; Check that SROA can split up alloca-wide integer loads and stores where the
1315 ; underlying alloca has smaller components that are accessed independently. This
1316 ; shows up particularly with ABI lowering patterns coming out of Clang that rely
1317 ; on the particular register placement of a single large integer return value.
1318 ; CHECK-LABEL: @PR14059.2(
1319 ; CHECK-NEXT: entry:
1320 ; CHECK-NEXT: [[PHI_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[PHI:%.*]], i32 0, i32 0
1321 ; CHECK-NEXT: [[PHI_REAL:%.*]] = load float, float* [[PHI_REALP]], align 4
1322 ; CHECK-NEXT: [[PHI_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[PHI]], i32 0, i32 1
1323 ; CHECK-NEXT: [[PHI_IMAG:%.*]] = load float, float* [[PHI_IMAGP]], align 4
1324 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[PHI_REAL]] to i32
1325 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[PHI_IMAG]] to i32
1326 ; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i32 [[TMP1]] to i64
1327 ; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32
1328 ; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, 4294967295
1329 ; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_3_0_INSERT_MASK]], [[RETVAL_SROA_3_0_INSERT_SHIFT]]
1330 ; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[TMP0]] to i64
1331 ; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], -4294967296
1332 ; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_0_0_INSERT_MASK]], [[RETVAL_SROA_0_0_INSERT_EXT]]
1333 ; CHECK-NEXT: ret i64 [[RETVAL_SROA_0_0_INSERT_INSERT]]
1337 %retval = alloca { float, float }, align 4
1339 %0 = bitcast { float, float }* %retval to i64*
1340 store i64 0, i64* %0
1342 %phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
1343 %phi.real = load float, float* %phi.realp
1344 %phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
1345 %phi.imag = load float, float* %phi.imagp
1347 %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
1348 %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
1349 store float %phi.real, float* %real
1350 store float %phi.imag, float* %imag
1352 %1 = load i64, i64* %0, align 1
1356 define void @PR14105({ [16 x i8] }* %ptr) {
1357 ; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
1358 ; sign as negative. We use a volatile memcpy to ensure promotion never actually
1360 ; CHECK-LABEL: @PR14105(
1361 ; CHECK-NEXT: entry:
1362 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [16 x i8], align 8
1363 ; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* [[PTR:%.*]], i64 -1, i32 0, i64 0
1364 ; CHECK-NEXT: [[A_SROA_0_0_CAST2_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_0]], i64 0, i64 0
1365 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[A_SROA_0_0_CAST1_SROA_IDX]], i8* align 8 [[A_SROA_0_0_CAST2_SROA_IDX]], i32 16, i1 true)
1366 ; CHECK-NEXT: ret void
1370 %a = alloca { [16 x i8] }, align 8
1372 %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
1374 %cast1 = bitcast { [16 x i8 ] }* %gep to i8*
1375 %cast2 = bitcast { [16 x i8 ] }* %a to i8*
1376 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
1380 define void @PR14105_as1({ [16 x i8] } addrspace(1)* %ptr) {
1381 ; Make sure this the right address space pointer is used for type check.
1382 ; CHECK-LABEL: @PR14105_as1(
1383 ; CHECK-NEXT: entry:
1384 ; CHECK-NEXT: [[A:%.*]] = alloca { [16 x i8] }, align 8
1385 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* [[PTR:%.*]], i64 -1
1386 ; CHECK-NEXT: [[CAST1:%.*]] = bitcast { [16 x i8] } addrspace(1)* [[GEP]] to i8 addrspace(1)*
1387 ; CHECK-NEXT: [[CAST2:%.*]] = bitcast { [16 x i8] }* [[A]] to i8*
1388 ; CHECK-NEXT: call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 [[CAST1]], i8* align 8 [[CAST2]], i32 16, i1 true)
1389 ; CHECK-NEXT: ret void
1393 %a = alloca { [16 x i8] }, align 8
1394 %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
1395 %cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
1396 %cast2 = bitcast { [16 x i8 ] }* %a to i8*
1397 call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
1401 define void @PR14465() {
1402 ; Ensure that we don't crash when analyzing a alloca larger than the maximum
1403 ; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
1404 ; CHECK-LABEL: @PR14465(
1405 ; CHECK-NEXT: [[STACK:%.*]] = alloca [1048576 x i32], align 16
1406 ; CHECK-NEXT: [[STACK_0_CAST_SROA_CAST:%.*]] = bitcast [1048576 x i32]* [[STACK]] to i8*
1407 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[STACK_0_CAST_SROA_CAST]], i8 -2, i64 4194304, i1 false)
1408 ; CHECK-NEXT: ret void
1411 %stack = alloca [1048576 x i32], align 16
1412 %cast = bitcast [1048576 x i32]* %stack to i8*
1413 call void @llvm.memset.p0i8.i64(i8* align 16 %cast, i8 -2, i64 4194304, i1 false)
1417 define void @PR14548(i1 %x) {
1418 ; Handle a mixture of i1 and i8 loads and stores to allocas. This particular
1419 ; pattern caused crashes and invalid output in the PR, and its nature will
1420 ; trigger a mixture in several permutations as we resolve each alloca
1422 ; Note that we don't do a particularly good *job* of handling these mixtures,
1423 ; but the hope is that this is very rare.
1424 ; CHECK-LABEL: @PR14548(
1425 ; CHECK-NEXT: entry:
1426 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i8, align 8
1427 ; CHECK-NEXT: [[B_SROA_0:%.*]] = alloca i8, align 8
1428 ; CHECK-NEXT: [[B_SROA_0_0_B_I1_SROA_CAST1:%.*]] = bitcast i8* [[B_SROA_0]] to i1*
1429 ; CHECK-NEXT: store i1 [[X:%.*]], i1* [[B_SROA_0_0_B_I1_SROA_CAST1]], align 8
1430 ; CHECK-NEXT: [[B_SROA_0_0_B_SROA_0_0_FOO:%.*]] = load i8, i8* [[B_SROA_0]], align 8
1431 ; CHECK-NEXT: [[B_SROA_0_0_B_SROA_0_0_COPYLOAD:%.*]] = load i8, i8* [[B_SROA_0]], align 8
1432 ; CHECK-NEXT: store i8 [[B_SROA_0_0_B_SROA_0_0_COPYLOAD]], i8* [[A_SROA_0]], align 8
1433 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_BAR:%.*]] = load i8, i8* [[A_SROA_0]], align 8
1434 ; CHECK-NEXT: [[A_SROA_0_0_A_I1_SROA_CAST2:%.*]] = bitcast i8* [[A_SROA_0]] to i1*
1435 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_BAZ:%.*]] = load i1, i1* [[A_SROA_0_0_A_I1_SROA_CAST2]], align 8
1436 ; CHECK-NEXT: ret void
1440 %a = alloca <{ i1 }>, align 8
1441 %b = alloca <{ i1 }>, align 8
1443 %b.i1 = bitcast <{ i1 }>* %b to i1*
1444 store i1 %x, i1* %b.i1, align 8
1445 %b.i8 = bitcast <{ i1 }>* %b to i8*
1446 %foo = load i8, i8* %b.i8, align 1
1448 %a.i8 = bitcast <{ i1 }>* %a to i8*
1449 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i1 false) nounwind
1450 %bar = load i8, i8* %a.i8, align 1
1451 %a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
1452 %baz = load i1, i1* %a.i1, align 1
1457 define <3 x i8> @PR14572.1(i32 %x) {
1458 ; Ensure that a split integer store which is wider than the type size of the
1459 ; alloca (relying on the alloc size padding) doesn't trigger an assert.
1460 ; CHECK-LABEL: @PR14572.1(
1461 ; CHECK-NEXT: entry:
1462 ; CHECK-NEXT: [[A_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[X:%.*]] to i24
1463 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i24 [[A_0_EXTRACT_TRUNC]] to <3 x i8>
1464 ; CHECK-NEXT: [[A_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[X]], 24
1465 ; CHECK-NEXT: [[A_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_2_0_EXTRACT_SHIFT]] to i8
1466 ; CHECK-NEXT: ret <3 x i8> [[TMP0]]
1470 %a = alloca <3 x i8>, align 4
1472 %cast = bitcast <3 x i8>* %a to i32*
1473 store i32 %x, i32* %cast, align 1
1474 %y = load <3 x i8>, <3 x i8>* %a, align 4
1478 define i32 @PR14572.2(<3 x i8> %x) {
1479 ; Ensure that a split integer load which is wider than the type size of the
1480 ; alloca (relying on the alloc size padding) doesn't trigger an assert.
1481 ; CHECK-LABEL: @PR14572.2(
1482 ; CHECK-NEXT: entry:
1483 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <3 x i8> [[X:%.*]] to i24
1484 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 undef to i32
1485 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_2_0_INSERT_EXT]], 24
1486 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i32 undef, 16777215
1487 ; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
1488 ; CHECK-NEXT: [[A_0_INSERT_EXT:%.*]] = zext i24 [[TMP0]] to i32
1489 ; CHECK-NEXT: [[A_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_2_0_INSERT_INSERT]], -16777216
1490 ; CHECK-NEXT: [[A_0_INSERT_INSERT:%.*]] = or i32 [[A_0_INSERT_MASK]], [[A_0_INSERT_EXT]]
1491 ; CHECK-NEXT: ret i32 [[A_0_INSERT_INSERT]]
1495 %a = alloca <3 x i8>, align 4
1497 store <3 x i8> %x, <3 x i8>* %a, align 1
1498 %cast = bitcast <3 x i8>* %a to i32*
1499 %y = load i32, i32* %cast, align 4
1503 define i32 @PR14601(i32 %x) {
1504 ; Don't try to form a promotable integer alloca when there is a variable length
1506 ; CHECK-LABEL: @PR14601(
1507 ; CHECK-NEXT: entry:
1508 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
1509 ; CHECK-NEXT: [[A_0_A_I8_SROA_CAST:%.*]] = bitcast i32* [[A]] to i8*
1510 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[A_0_A_I8_SROA_CAST]], i8 0, i32 [[X:%.*]], i1 false)
1511 ; CHECK-NEXT: [[A_0_V:%.*]] = load i32, i32* [[A]], align 4
1512 ; CHECK-NEXT: ret i32 [[A_0_V]]
1518 %a.i8 = bitcast i32* %a to i8*
1519 call void @llvm.memset.p0i8.i32(i8* %a.i8, i8 0, i32 %x, i1 false)
1520 %v = load i32, i32* %a
1524 define void @PR15674(i8* %data, i8* %src, i32 %size) {
1525 ; Arrange (via control flow) to have unmerged stores of a particular width to
1526 ; an alloca where we incrementally store from the end of the array toward the
1527 ; beginning of the array. Ensure that the final integer store, despite being
1528 ; convertable to the integer type that we end up promoting this alloca toward,
1529 ; doesn't get widened to a full alloca store.
1530 ; CHECK-LABEL: @PR15674(
1531 ; CHECK-NEXT: entry:
1532 ; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca i32, align 4
1533 ; CHECK-NEXT: switch i32 [[SIZE:%.*]], label [[END:%.*]] [
1534 ; CHECK-NEXT: i32 4, label [[BB4:%.*]]
1535 ; CHECK-NEXT: i32 3, label [[BB3:%.*]]
1536 ; CHECK-NEXT: i32 2, label [[BB2:%.*]]
1537 ; CHECK-NEXT: i32 1, label [[BB1:%.*]]
1540 ; CHECK-NEXT: [[SRC_GEP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i32 3
1541 ; CHECK-NEXT: [[SRC_3:%.*]] = load i8, i8* [[SRC_GEP3]], align 1
1542 ; CHECK-NEXT: [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_CAST7:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
1543 ; CHECK-NEXT: [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_IDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_CAST7]], i64 3
1544 ; CHECK-NEXT: store i8 [[SRC_3]], i8* [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_IDX8]], align 1
1545 ; CHECK-NEXT: br label [[BB3]]
1547 ; CHECK-NEXT: [[SRC_GEP2:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 2
1548 ; CHECK-NEXT: [[SRC_2:%.*]] = load i8, i8* [[SRC_GEP2]], align 1
1549 ; CHECK-NEXT: [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_CAST5:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
1550 ; CHECK-NEXT: [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_IDX6:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_CAST5]], i64 2
1551 ; CHECK-NEXT: store i8 [[SRC_2]], i8* [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_IDX6]], align 2
1552 ; CHECK-NEXT: br label [[BB2]]
1554 ; CHECK-NEXT: [[SRC_GEP1:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 1
1555 ; CHECK-NEXT: [[SRC_1:%.*]] = load i8, i8* [[SRC_GEP1]], align 1
1556 ; CHECK-NEXT: [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_CAST3:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
1557 ; CHECK-NEXT: [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_IDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_CAST3]], i64 1
1558 ; CHECK-NEXT: store i8 [[SRC_1]], i8* [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_IDX4]], align 1
1559 ; CHECK-NEXT: br label [[BB1]]
1561 ; CHECK-NEXT: [[SRC_GEP0:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 0
1562 ; CHECK-NEXT: [[SRC_0:%.*]] = load i8, i8* [[SRC_GEP0]], align 1
1563 ; CHECK-NEXT: [[TMP_SROA_0_0_TMP_GEP0_SROA_CAST2:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
1564 ; CHECK-NEXT: store i8 [[SRC_0]], i8* [[TMP_SROA_0_0_TMP_GEP0_SROA_CAST2]], align 4
1565 ; CHECK-NEXT: br label [[END]]
1567 ; CHECK-NEXT: [[TMP_SROA_0_0_TMP_RAW_SROA_CAST1:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
1568 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DATA:%.*]], i8* align 4 [[TMP_SROA_0_0_TMP_RAW_SROA_CAST1]], i32 [[SIZE]], i1 false)
1569 ; CHECK-NEXT: ret void
1573 %tmp = alloca [4 x i8], align 1
1575 switch i32 %size, label %end [
1583 %src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
1584 %src.3 = load i8, i8* %src.gep3
1585 %tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
1586 store i8 %src.3, i8* %tmp.gep3
1591 %src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
1592 %src.2 = load i8, i8* %src.gep2
1593 %tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
1594 store i8 %src.2, i8* %tmp.gep2
1599 %src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
1600 %src.1 = load i8, i8* %src.gep1
1601 %tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
1602 store i8 %src.1, i8* %tmp.gep1
1607 %src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
1608 %src.0 = load i8, i8* %src.gep0
1609 %tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
1610 store i8 %src.0, i8* %tmp.gep0
1615 %tmp.raw = bitcast [4 x i8]* %tmp to i8*
1616 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %data, i8* %tmp.raw, i32 %size, i1 false)
1620 define void @PR15805(i1 %a, i1 %b) {
1621 ; CHECK-LABEL: @PR15805(
1622 ; CHECK-NEXT: [[COND_SROA_SPECULATED:%.*]] = select i1 undef, i64 undef, i64 undef
1623 ; CHECK-NEXT: ret void
1626 %c = alloca i64, align 8
1627 %p.0.c = select i1 undef, i64* %c, i64* %c
1628 %cond.in = select i1 undef, i64* %p.0.c, i64* %c
1629 %cond = load i64, i64* %cond.in, align 8
1633 define void @PR15805.1(i1 %a, i1 %b) {
1634 ; Same as the normal PR15805, but rigged to place the use before the def inside
1635 ; of looping unreachable code. This helps ensure that we aren't sensitive to the
1636 ; order in which the uses of the alloca are visited.
1638 ; CHECK-LABEL: @PR15805.1(
1639 ; CHECK-NEXT: br label [[EXIT:%.*]]
1641 ; CHECK-NEXT: [[COND_SROA_SPECULATED:%.*]] = select i1 undef, i64 undef, i64 undef
1642 ; CHECK-NEXT: br i1 undef, label [[LOOP:%.*]], label [[EXIT]]
1644 ; CHECK-NEXT: ret void
1647 %c = alloca i64, align 8
1651 %cond.in = select i1 undef, i64* %c, i64* %p.0.c
1652 %p.0.c = select i1 undef, i64* %c, i64* %c
1653 %cond = load i64, i64* %cond.in, align 8
1654 br i1 undef, label %loop, label %exit
1660 define void @PR16651.1(i8* %a) {
1661 ; This test case caused a crash due to the volatile memcpy in combination with
1662 ; lowering to integer loads and stores of a width other than that of the original
1665 ; CHECK-LABEL: @PR16651.1(
1666 ; CHECK-NEXT: entry:
1667 ; CHECK-NEXT: [[B_SROA_0:%.*]] = alloca i16, align 4
1668 ; CHECK-NEXT: [[B_SROA_1:%.*]] = alloca i8, align 2
1669 ; CHECK-NEXT: [[B_SROA_2:%.*]] = alloca i8, align 1
1670 ; CHECK-NEXT: [[B_SROA_0_0_A_SROA_CAST:%.*]] = bitcast i8* [[A:%.*]] to i16*
1671 ; CHECK-NEXT: [[B_SROA_0_0_COPYLOAD:%.*]] = load volatile i16, i16* [[B_SROA_0_0_A_SROA_CAST]], align 4
1672 ; CHECK-NEXT: store volatile i16 [[B_SROA_0_0_COPYLOAD]], i16* [[B_SROA_0]], align 4
1673 ; CHECK-NEXT: [[B_SROA_1_0_A_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 2
1674 ; CHECK-NEXT: [[B_SROA_1_0_COPYLOAD:%.*]] = load volatile i8, i8* [[B_SROA_1_0_A_SROA_RAW_IDX]], align 2
1675 ; CHECK-NEXT: store volatile i8 [[B_SROA_1_0_COPYLOAD]], i8* [[B_SROA_1]], align 2
1676 ; CHECK-NEXT: [[B_SROA_2_0_A_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 3
1677 ; CHECK-NEXT: [[B_SROA_2_0_COPYLOAD:%.*]] = load volatile i8, i8* [[B_SROA_2_0_A_SROA_RAW_IDX]], align 1
1678 ; CHECK-NEXT: store volatile i8 [[B_SROA_2_0_COPYLOAD]], i8* [[B_SROA_2]], align 1
1679 ; CHECK-NEXT: [[B_SROA_1_0_B_SROA_1_2_:%.*]] = load i8, i8* [[B_SROA_1]], align 2
1680 ; CHECK-NEXT: unreachable
1684 %b = alloca i32, align 4
1685 %b.cast = bitcast i32* %b to i8*
1686 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %b.cast, i8* align 4 %a, i32 4, i1 true)
1687 %b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
1688 load i8, i8* %b.gep, align 2
1692 define void @PR16651.2() {
1693 ; This test case caused a crash due to failing to promote given a select that
1694 ; can't be speculated. It shouldn't be promoted, but we missed that fact when
1695 ; analyzing whether we could form a vector promotion because that code didn't
1696 ; bail on select instructions.
1698 ; CHECK-LABEL: @PR16651.2(
1699 ; CHECK-NEXT: entry:
1700 ; CHECK-NEXT: [[TV1_SROA_0:%.*]] = alloca <2 x float>, align 8
1701 ; CHECK-NEXT: store <2 x float> undef, <2 x float>* [[TV1_SROA_0]], align 8
1702 ; CHECK-NEXT: [[TV1_SROA_0_0__SROA_IDX:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TV1_SROA_0]], i64 0, i32 0
1703 ; CHECK-NEXT: [[COND105_IN_I_I:%.*]] = select i1 undef, float* null, float* [[TV1_SROA_0_0__SROA_IDX]]
1704 ; CHECK-NEXT: [[COND105_I_I:%.*]] = load float, float* [[COND105_IN_I_I]], align 8
1705 ; CHECK-NEXT: ret void
1709 %tv1 = alloca { <2 x float>, <2 x float> }, align 8
1710 %0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
1711 store <2 x float> undef, <2 x float>* %0, align 8
1712 %1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
1713 %cond105.in.i.i = select i1 undef, float* null, float* %1
1714 %cond105.i.i = load float, float* %cond105.in.i.i, align 8
1718 define void @test23(i32 %x) {
1719 ; CHECK-LABEL: @test23(
1720 ; CHECK-NEXT: entry:
1721 ; CHECK-NEXT: ret void
1724 %a = alloca i32, align 4
1725 store i32 %x, i32* %a, align 4
1726 %gep1 = getelementptr inbounds i32, i32* %a, i32 1
1727 %gep0 = getelementptr inbounds i32, i32* %a, i32 0
1728 %cast1 = bitcast i32* %gep1 to i8*
1729 %cast0 = bitcast i32* %gep0 to i8*
1730 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i1 false)
1734 define void @PR18615() {
1735 ; CHECK-LABEL: @PR18615(
1736 ; CHECK-NEXT: entry:
1737 ; CHECK-NEXT: ret void
1741 %gep = getelementptr i8, i8* %f, i64 -1
1742 call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i1 false)
1746 define void @test24(i8* %src, i8* %dst) {
1747 ; CHECK-LABEL: @test24(
1748 ; CHECK-NEXT: entry:
1749 ; CHECK-NEXT: [[A:%.*]] = alloca i64, align 16
1750 ; CHECK-NEXT: [[A_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i64*
1751 ; CHECK-NEXT: [[A_0_COPYLOAD:%.*]] = load volatile i64, i64* [[A_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
1752 ; CHECK-NEXT: store volatile i64 [[A_0_COPYLOAD]], i64* [[A]], align 16, !tbaa [[TBAA0]]
1753 ; CHECK-NEXT: [[A_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i64*
1754 ; CHECK-NEXT: [[A_0_COPYLOAD1:%.*]] = load volatile i64, i64* [[A]], align 16, !tbaa [[TBAA3]]
1755 ; CHECK-NEXT: store volatile i64 [[A_0_COPYLOAD1]], i64* [[A_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA3]]
1756 ; CHECK-NEXT: ret void
1760 %a = alloca i64, align 16
1761 %ptr = bitcast i64* %a to i8*
1762 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 true), !tbaa !0
1763 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 true), !tbaa !3
1767 define float @test25() {
1768 ; Check that we split up stores in order to promote the smaller SSA values.. These types
1769 ; of patterns can arise because LLVM maps small memcpy's to integer load and
1770 ; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
1771 ; produce, but so might any language frontend), this will in many cases turn into
1772 ; an integer load and store. SROA needs to be extremely powerful to correctly
1773 ; handle these cases and form splitable and promotable SSA values.
1775 ; CHECK-LABEL: @test25(
1776 ; CHECK-NEXT: entry:
1777 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 0 to float
1778 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 1065353216 to float
1779 ; CHECK-NEXT: [[RET:%.*]] = fadd float [[TMP0]], [[TMP1]]
1780 ; CHECK-NEXT: ret float [[RET]]
1786 %a.cast = bitcast i64* %a to [2 x float]*
1787 %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
1788 %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
1789 %b.cast = bitcast i64* %b to [2 x float]*
1790 %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
1791 %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
1792 store float 0.0, float* %a.gep1
1793 store float 1.0, float* %a.gep2
1794 %v = load i64, i64* %a
1795 store i64 %v, i64* %b
1796 %f1 = load float, float* %b.gep1
1797 %f2 = load float, float* %b.gep2
1798 %ret = fadd float %f1, %f2
1802 @complex1 = external global [2 x float]
1803 @complex2 = external global [2 x float]
1805 define void @test26() {
1806 ; Test a case of splitting up loads and stores against a globals.
1808 ; CHECK-LABEL: @test26(
1809 ; CHECK-NEXT: entry:
1810 ; CHECK-NEXT: [[V14:%.*]] = load i32, i32* bitcast ([2 x float]* @complex1 to i32*), align 4
1811 ; CHECK-NEXT: [[V16:%.*]] = load i32, i32* bitcast (float* getelementptr inbounds ([2 x float], [2 x float]* @complex1, i64 0, i64 1) to i32*), align 4
1812 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[V14]] to float
1813 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[V16]] to float
1814 ; CHECK-NEXT: [[SUM:%.*]] = fadd float [[TMP0]], [[TMP1]]
1815 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[SUM]] to i32
1816 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[SUM]] to i32
1817 ; CHECK-NEXT: store i32 [[TMP2]], i32* bitcast ([2 x float]* @complex2 to i32*), align 4
1818 ; CHECK-NEXT: store i32 [[TMP3]], i32* bitcast (float* getelementptr inbounds ([2 x float], [2 x float]* @complex2, i64 0, i64 1) to i32*), align 4
1819 ; CHECK-NEXT: ret void
1824 %a.cast = bitcast i64* %a to [2 x float]*
1825 %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
1826 %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
1827 %v1 = load i64, i64* bitcast ([2 x float]* @complex1 to i64*)
1828 store i64 %v1, i64* %a
1829 %f1 = load float, float* %a.gep1
1830 %f2 = load float, float* %a.gep2
1831 %sum = fadd float %f1, %f2
1832 store float %sum, float* %a.gep1
1833 store float %sum, float* %a.gep2
1834 %v2 = load i64, i64* %a
1835 store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
1839 define float @test27() {
1840 ; Another, more complex case of splittable i64 loads and stores. This example
1841 ; is a particularly challenging one because the load and store both point into
1842 ; the alloca SROA is processing, and they overlap but at an offset.
1844 ; CHECK-LABEL: @test27(
1845 ; CHECK-NEXT: entry:
1846 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 0 to float
1847 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 1065353216 to float
1848 ; CHECK-NEXT: [[RET:%.*]] = fadd float [[TMP0]], [[TMP1]]
1849 ; CHECK-NEXT: ret float [[RET]]
1853 %a = alloca [12 x i8]
1854 %gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
1855 %gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
1856 %gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
1857 %iptr1 = bitcast i8* %gep1 to i64*
1858 %iptr2 = bitcast i8* %gep2 to i64*
1859 %fptr1 = bitcast i8* %gep1 to float*
1860 %fptr2 = bitcast i8* %gep2 to float*
1861 %fptr3 = bitcast i8* %gep3 to float*
1862 store float 0.0, float* %fptr1
1863 store float 1.0, float* %fptr2
1864 %v = load i64, i64* %iptr1
1865 store i64 %v, i64* %iptr2
1866 %f1 = load float, float* %fptr2
1867 %f2 = load float, float* %fptr3
1868 %ret = fadd float %f1, %f2
1872 define i32 @PR22093() {
1873 ; Test that we don't try to pre-split a splittable store of a splittable but
1874 ; not pre-splittable load over the same alloca. We "handle" this case when the
1875 ; load is unsplittable but unrelated to this alloca by just generating extra
1876 ; loads without touching the original, but when the original load was out of
1877 ; this alloca we need to handle it specially to ensure the splits line up
1878 ; properly for rewriting.
1880 ; CHECK-LABEL: @PR22093(
1881 ; CHECK-NEXT: entry:
1882 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i16, align 4
1883 ; CHECK-NEXT: store volatile i16 42, i16* [[A_SROA_0]], align 4
1884 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_LOAD:%.*]] = load i16, i16* [[A_SROA_0]], align 4
1885 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i16 undef to i32
1886 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_3_0_INSERT_EXT]], 16
1887 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i32 undef, 65535
1888 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
1889 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[A_SROA_0_0_A_SROA_0_0_LOAD]] to i32
1890 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_3_0_INSERT_INSERT]], -65536
1891 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
1892 ; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i16
1893 ; CHECK-NEXT: store i16 [[A_SROA_0_0_EXTRACT_TRUNC]], i16* [[A_SROA_0]], align 4
1894 ; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 16
1895 ; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_3_0_EXTRACT_SHIFT]] to i16
1896 ; CHECK-NEXT: ret i32 [[A_SROA_0_0_INSERT_INSERT]]
1901 %a.cast = bitcast i32* %a to i16*
1902 store volatile i16 42, i16* %a.cast
1903 %load = load i32, i32* %a
1904 store i32 %load, i32* %a
1908 define void @PR22093.2() {
1909 ; Another way that we end up being unable to split a particular set of loads
1910 ; and stores can even have ordering importance. Here we have a load which is
1911 ; pre-splittable by itself, and the first store is also compatible. But the
1912 ; second store of the load makes the load unsplittable because of a mismatch of
1913 ; splits. Because this makes the load unsplittable, we also have to go back and
1914 ; remove the first store from the presplit candidates as its load won't be
1917 ; CHECK-LABEL: @PR22093.2(
1918 ; CHECK-NEXT: entry:
1919 ; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i16, align 8
1920 ; CHECK-NEXT: [[A_SROA_31:%.*]] = alloca i8, align 4
1921 ; CHECK-NEXT: store volatile i16 42, i16* [[A_SROA_0]], align 8
1922 ; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_LOAD:%.*]] = load i16, i16* [[A_SROA_0]], align 8
1923 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i16 undef to i32
1924 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_3_0_INSERT_EXT]], 16
1925 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i32 undef, 65535
1926 ; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
1927 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[A_SROA_0_0_A_SROA_0_0_LOAD]] to i32
1928 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_3_0_INSERT_INSERT]], -65536
1929 ; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
1930 ; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i16
1931 ; CHECK-NEXT: store i16 [[A_SROA_0_0_EXTRACT_TRUNC]], i16* [[A_SROA_0]], align 8
1932 ; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 16
1933 ; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_3_0_EXTRACT_SHIFT]] to i16
1934 ; CHECK-NEXT: store volatile i8 13, i8* [[A_SROA_31]], align 4
1935 ; CHECK-NEXT: [[A_SROA_31_4_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i8
1936 ; CHECK-NEXT: store i8 [[A_SROA_31_4_EXTRACT_TRUNC]], i8* [[A_SROA_31]], align 4
1937 ; CHECK-NEXT: [[A_SROA_5_4_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 8
1938 ; CHECK-NEXT: [[A_SROA_5_4_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_5_4_EXTRACT_SHIFT]] to i24
1939 ; CHECK-NEXT: ret void
1944 %a.cast1 = bitcast i64* %a to i32*
1945 %a.cast2 = bitcast i64* %a to i16*
1946 store volatile i16 42, i16* %a.cast2
1947 %load = load i32, i32* %a.cast1
1948 store i32 %load, i32* %a.cast1
1949 %a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
1950 %a.cast3 = bitcast i32* %a.gep1 to i8*
1951 store volatile i8 13, i8* %a.cast3
1952 store i32 %load, i32* %a.gep1
1956 define void @PR23737() {
1957 ; CHECK-LABEL: @PR23737(
1958 ; CHECK-NEXT: entry:
1959 ; CHECK-NEXT: [[PTR:%.*]] = alloca i64, align 8
1960 ; CHECK-NEXT: store atomic volatile i64 0, i64* [[PTR]] seq_cst, align 8
1961 ; CHECK-NEXT: [[PTR_0_LOAD:%.*]] = load atomic volatile i64, i64* [[PTR]] seq_cst, align 8
1962 ; CHECK-NEXT: ret void
1965 %ptr = alloca i64, align 8
1966 store atomic volatile i64 0, i64* %ptr seq_cst, align 8
1967 %load = load atomic volatile i64, i64* %ptr seq_cst, align 8
1971 define i16 @PR24463() {
1972 ; Ensure we can handle a very interesting case where there is an integer-based
1973 ; rewrite of the uses of the alloca, but where one of the integers in that is
1974 ; a sub-integer that requires extraction *and* extends past the end of the
1975 ; alloca. SROA can split the alloca to avoid shift or trunc.
1977 ; CHECK-LABEL: @PR24463(
1978 ; CHECK-NEXT: entry:
1979 ; CHECK-NEXT: [[ALLOCA_SROA_1_2_LOAD_EXT:%.*]] = zext i8 0 to i16
1980 ; CHECK-NEXT: ret i16 [[ALLOCA_SROA_1_2_LOAD_EXT]]
1983 %alloca = alloca [3 x i8]
1984 %gep1 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 1
1985 %bc1 = bitcast i8* %gep1 to i16*
1986 store i16 0, i16* %bc1
1987 %gep2 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 2
1988 %bc2 = bitcast i8* %gep2 to i16*
1989 %load = load i16, i16* %bc2
1993 %struct.STest = type { %struct.SPos, %struct.SPos }
1994 %struct.SPos = type { float, float }
1996 define void @PR25873(%struct.STest* %outData) {
1997 ; CHECK-LABEL: @PR25873(
1998 ; CHECK-NEXT: entry:
1999 ; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST:%.*]] = bitcast %struct.STest* [[OUTDATA:%.*]] to i32*
2000 ; CHECK-NEXT: store i32 1123418112, i32* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST]], align 4
2001 ; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STEST:%.*]], %struct.STest* [[OUTDATA]], i64 0, i32 0, i32 1
2002 ; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST16:%.*]] = bitcast float* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_IDX]] to i32*
2003 ; CHECK-NEXT: store i32 1139015680, i32* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST16]], align 4
2004 ; CHECK-NEXT: [[TMPDATA_SROA_6_0__SROA_IDX3:%.*]] = getelementptr inbounds [[STRUCT_STEST]], %struct.STest* [[OUTDATA]], i64 0, i32 1
2005 ; CHECK-NEXT: [[TMPDATA_SROA_6_0__SROA_CAST4:%.*]] = bitcast %struct.SPos* [[TMPDATA_SROA_6_0__SROA_IDX3]] to i64*
2006 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_EXT:%.*]] = zext i32 1139015680 to i64
2007 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_SHIFT:%.*]] = shl i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_EXT]], 32
2008 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_MASK:%.*]] = and i64 undef, 4294967295
2009 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_INSERT:%.*]] = or i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_MASK]], [[TMPDATA_SROA_6_SROA_4_0_INSERT_SHIFT]]
2010 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_EXT:%.*]] = zext i32 1123418112 to i64
2011 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_INSERT]], -4294967296
2012 ; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[TMPDATA_SROA_6_SROA_0_0_INSERT_MASK]], [[TMPDATA_SROA_6_SROA_0_0_INSERT_EXT]]
2013 ; CHECK-NEXT: store i64 [[TMPDATA_SROA_6_SROA_0_0_INSERT_INSERT]], i64* [[TMPDATA_SROA_6_0__SROA_CAST4]], align 4
2014 ; CHECK-NEXT: ret void
2017 %tmpData = alloca %struct.STest, align 8
2018 %0 = bitcast %struct.STest* %tmpData to i8*
2019 call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
2020 %x = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 0
2021 store float 1.230000e+02, float* %x, align 8
2022 %y = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 1
2023 store float 4.560000e+02, float* %y, align 4
2024 %m_posB = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 1
2025 %1 = bitcast %struct.STest* %tmpData to i64*
2026 %2 = bitcast %struct.SPos* %m_posB to i64*
2027 %3 = load i64, i64* %1, align 8
2028 store i64 %3, i64* %2, align 8
2029 %4 = bitcast %struct.STest* %outData to i8*
2030 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %0, i64 16, i1 false)
2031 call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
2035 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
2037 define void @PR27999() unnamed_addr {
2038 ; CHECK-LABEL: @PR27999(
2039 ; CHECK-NEXT: entry-block:
2040 ; CHECK-NEXT: ret void
2043 %0 = alloca [2 x i64], align 8
2044 %1 = bitcast [2 x i64]* %0 to i8*
2045 call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
2046 %2 = getelementptr inbounds [2 x i64], [2 x i64]* %0, i32 0, i32 1
2047 %3 = bitcast i64* %2 to i8*
2048 call void @llvm.lifetime.end.p0i8(i64 8, i8* %3)
2052 define void @PR29139() {
2053 ; CHECK-LABEL: @PR29139(
2055 ; CHECK-NEXT: ret void
2058 %e.7.sroa.6.i = alloca i32, align 1
2059 %e.7.sroa.6.0.load81.i = load i32, i32* %e.7.sroa.6.i, align 1
2060 %0 = bitcast i32* %e.7.sroa.6.i to i8*
2061 call void @llvm.lifetime.end.p0i8(i64 2, i8* %0)
2065 ; PR35657 reports assertion failure with this code
2066 define void @PR35657(i64 %v) {
2067 ; CHECK-LABEL: @PR35657(
2068 ; CHECK-NEXT: entry:
2069 ; CHECK-NEXT: [[A48_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[V:%.*]] to i16
2070 ; CHECK-NEXT: [[A48_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i64 [[V]], 16
2071 ; CHECK-NEXT: [[A48_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[A48_SROA_2_0_EXTRACT_SHIFT]] to i48
2072 ; CHECK-NEXT: call void @callee16(i16 [[A48_SROA_0_0_EXTRACT_TRUNC]])
2073 ; CHECK-NEXT: call void @callee48(i48 [[A48_SROA_2_0_EXTRACT_TRUNC]])
2074 ; CHECK-NEXT: ret void
2078 %a48.cast64 = bitcast i48* %a48 to i64*
2079 store i64 %v, i64* %a48.cast64
2080 %a48.cast16 = bitcast i48* %a48 to i16*
2081 %b0_15 = load i16, i16* %a48.cast16
2082 %a48.cast8 = bitcast i48* %a48 to i8*
2083 %a48_offset2 = getelementptr inbounds i8, i8* %a48.cast8, i64 2
2084 %a48_offset2.cast48 = bitcast i8* %a48_offset2 to i48*
2085 %b16_63 = load i48, i48* %a48_offset2.cast48, align 2
2086 call void @callee16(i16 %b0_15)
2087 call void @callee48(i48 %b16_63)
2091 declare void @callee16(i16 %a)
2092 declare void @callee48(i48 %a)
2094 define void @test28(i64 %v) #0 {
2095 ; SROA should split the first i64 store to avoid additional and/or instructions
2096 ; when storing into i32 fields
2097 ; CHECK-LABEL: @test28(
2098 ; CHECK-NEXT: entry:
2099 ; CHECK-NEXT: [[T_SROA_0_8_EXTRACT_TRUNC:%.*]] = trunc i64 [[V:%.*]] to i32
2100 ; CHECK-NEXT: [[T_SROA_2_8_EXTRACT_SHIFT:%.*]] = lshr i64 [[V]], 32
2101 ; CHECK-NEXT: [[T_SROA_2_8_EXTRACT_TRUNC:%.*]] = trunc i64 [[T_SROA_2_8_EXTRACT_SHIFT]] to i32
2102 ; CHECK-NEXT: ret void
2106 %t = alloca { i64, i32, i32 }
2108 %b = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 1
2109 %0 = bitcast i32* %b to i64*
2110 store i64 %v, i64* %0
2112 %1 = load i32, i32* %b
2113 %c = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 2
2114 store i32 %1, i32* %c
2118 declare void @llvm.lifetime.start.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
2119 declare void @llvm.lifetime.end.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
2120 @array = dso_local global [10 x float] undef, align 4
2122 define void @test29(i32 %num, i32 %tid) {
2123 ; CHECK-LABEL: @test29(
2124 ; CHECK-NEXT: entry:
2125 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[NUM:%.*]], 0
2126 ; CHECK-NEXT: br i1 [[CMP1]], label [[BB1:%.*]], label [[BB7:%.*]]
2128 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TID:%.*]], 0
2129 ; CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[TID]] to i64
2130 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @array, i64 0, i64 [[CONV_I]]
2131 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[ARRAYIDX5]] to i32*
2132 ; CHECK-NEXT: br label [[BB2:%.*]]
2134 ; CHECK-NEXT: [[I_02:%.*]] = phi i32 [ [[NUM]], [[BB1]] ], [ [[SUB:%.*]], [[BB5:%.*]] ]
2135 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[BB3:%.*]], label [[BB4:%.*]]
2137 ; CHECK-NEXT: br label [[BB5]]
2139 ; CHECK-NEXT: store i32 undef, i32* [[TMP0]], align 4
2140 ; CHECK-NEXT: br label [[BB5]]
2142 ; CHECK-NEXT: [[SUB]] = add i32 [[I_02]], -1
2143 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[SUB]], 0
2144 ; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB6:%.*]]
2146 ; CHECK-NEXT: br label [[BB7]]
2148 ; CHECK-NEXT: ret void
2152 %ra = alloca [10 x float], align 4
2153 call void @llvm.lifetime.start.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
2155 %cmp1 = icmp sgt i32 %num, 0
2156 br i1 %cmp1, label %bb1, label %bb7
2159 %tobool = icmp eq i32 %tid, 0
2160 %conv.i = zext i32 %tid to i64
2161 %0 = bitcast [10 x float]* %ra to i32*
2162 %1 = load i32, i32* %0, align 4
2163 %arrayidx5 = getelementptr inbounds [10 x float], [10 x float]* @array, i64 0, i64 %conv.i
2164 %2 = bitcast float* %arrayidx5 to i32*
2168 %i.02 = phi i32 [ %num, %bb1 ], [ %sub, %bb5 ]
2169 br i1 %tobool, label %bb3, label %bb4
2175 store i32 %1, i32* %2, align 4
2179 %sub = add i32 %i.02, -1
2180 %cmp = icmp sgt i32 %sub, 0
2181 br i1 %cmp, label %bb2, label %bb6
2187 call void @llvm.lifetime.end.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
2191 !0 = !{!1, !1, i64 0, i64 200}
2192 !1 = !{!2, i64 1, !"type_0"}
2194 !3 = !{!4, !4, i64 0, i64 1}
2195 !4 = !{!2, i64 1, !"type_3"}
2196 !5 = !{!6, !6, i64 0, i64 1}
2197 !6 = !{!2, i64 1, !"type_5"}
2198 !7 = !{!8, !8, i64 0, i64 1}
2199 !8 = !{!2, i64 1, !"type_7"}
2200 !9 = !{!10, !10, i64 0, i64 1}
2201 !10 = !{!2, i64 1, !"type_9"}
2202 !11 = !{!12, !12, i64 0, i64 1}
2203 !12 = !{!2, i64 1, !"type_11"}
2204 !13 = !{!14, !14, i64 0, i64 1}
2205 !14 = !{!2, i64 1, !"type_13"}
2206 !15 = !{!16, !16, i64 0, i64 1}
2207 !16 = !{!2, i64 1, !"type_15"}
2208 !17 = !{!18, !18, i64 0, i64 1}
2209 !18 = !{!2, i64 1, !"type_17"}
2210 !19 = !{!20, !20, i64 0, i64 1}
2211 !20 = !{!2, i64 1, !"type_19"}
2212 !21 = !{!22, !22, i64 0, i64 1}
2213 !22 = !{!2, i64 1, !"type_21"}
2214 !23 = !{!24, !24, i64 0, i64 1}
2215 !24 = !{!2, i64 1, !"type_23"}
2216 !25 = !{!26, !26, i64 0, i64 1}
2217 !26 = !{!2, i64 1, !"type_25"}
2218 !27 = !{!28, !28, i64 0, i64 1}
2219 !28 = !{!2, i64 1, !"type_27"}
2220 !29 = !{!30, !30, i64 0, i64 1}
2221 !30 = !{!2, i64 1, !"type_29"}
2222 !31 = !{!32, !32, i64 0, i64 1}
2223 !32 = !{!2, i64 1, !"type_31"}
2224 !33 = !{!34, !34, i64 0, i64 1}
2225 !34 = !{!2, i64 1, !"type_33"}
2226 !35 = !{!36, !36, i64 0, i64 1}
2227 !36 = !{!2, i64 1, !"type_35"}
2228 !37 = !{!38, !38, i64 0, i64 1}
2229 !38 = !{!2, i64 1, !"type_37"}
2230 !39 = !{!40, !40, i64 0, i64 1}
2231 !40 = !{!2, i64 1, !"type_39"}
2232 !41 = !{!42, !42, i64 0, i64 1}
2233 !42 = !{!2, i64 1, !"type_41"}
2234 !43 = !{!44, !44, i64 0, i64 1}
2235 !44 = !{!2, i64 1, !"type_43"}
2236 !45 = !{!46, !46, i64 0, i64 1}
2237 !46 = !{!2, i64 1, !"type_45"}
2238 !47 = !{!48, !48, i64 0, i64 1}
2239 !48 = !{!2, i64 1, !"type_47"}
2240 !49 = !{!50, !50, i64 0, i64 1}
2241 !50 = !{!2, i64 1, !"type_49"}
2242 !51 = !{!52, !52, i64 0, i64 1}
2243 !52 = !{!2, i64 1, !"type_51"}
2244 !53 = !{!54, !54, i64 0, i64 1}
2245 !54 = !{!2, i64 1, !"type_53"}
2246 !55 = !{!56, !56, i64 0, i64 1}
2247 !56 = !{!2, i64 1, !"type_55"}
2248 !57 = !{!58, !58, i64 0, i64 1}
2249 !58 = !{!2, i64 1, !"type_57"}
2250 !59 = !{!60, !60, i64 0, i64 1}
2251 !60 = !{!2, i64 1, !"type_59"}