1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
4 define void @v3_load_i32_mul_by_constant_store(ptr %src, ptr %dst) {
5 ; CHECK-LABEL: @v3_load_i32_mul_by_constant_store(
7 ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
8 ; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 2
9 ; CHECK-NEXT: [[L_SRC_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 4
10 ; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_2]], 10
11 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_0]], align 4
12 ; CHECK-NEXT: [[TMP1:%.*]] = mul nsw <2 x i32> [[TMP0]], <i32 10, i32 10>
13 ; CHECK-NEXT: store <2 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
14 ; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
15 ; CHECK-NEXT: store i32 [[MUL_2]], ptr [[DST_2]], align 4
16 ; CHECK-NEXT: ret void
19 %gep.src.0 = getelementptr inbounds i32, ptr %src, i32 0
20 %l.src.0 = load i32, ptr %gep.src.0, align 4
21 %mul.0 = mul nsw i32 %l.src.0, 10
23 %gep.src.1 = getelementptr inbounds i32, ptr %src, i32 1
24 %l.src.1 = load i32, ptr %gep.src.1, align 4
25 %mul.1 = mul nsw i32 %l.src.1, 10
27 %gep.src.2 = getelementptr inbounds i32, ptr %src, i32 2
28 %l.src.2 = load i32, ptr %gep.src.2, align 4
29 %mul.2 = mul nsw i32 %l.src.2, 10
31 store i32 %mul.0, ptr %dst
33 %dst.1 = getelementptr i32, ptr %dst, i32 1
34 store i32 %mul.1, ptr %dst.1
36 %dst.2 = getelementptr i32, ptr %dst, i32 2
37 store i32 %mul.2, ptr %dst.2
42 define void @v3_load_i32_mul_store(ptr %src.1, ptr %src.2, ptr %dst) {
43 ; CHECK-LABEL: @v3_load_i32_mul_store(
45 ; CHECK-NEXT: [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
46 ; CHECK-NEXT: [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
47 ; CHECK-NEXT: [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
48 ; CHECK-NEXT: [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
49 ; CHECK-NEXT: [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
50 ; CHECK-NEXT: [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
51 ; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
52 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
53 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
54 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
55 ; CHECK-NEXT: store <2 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
56 ; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
57 ; CHECK-NEXT: store i32 [[MUL_2]], ptr [[DST_2]], align 4
58 ; CHECK-NEXT: ret void
61 %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
62 %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
63 %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
64 %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
65 %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
67 %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
68 %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
69 %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
70 %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
71 %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
73 %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
74 %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
75 %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
76 %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
77 %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
79 store i32 %mul.0, ptr %dst
81 %dst.1 = getelementptr i32, ptr %dst, i32 1
82 store i32 %mul.1, ptr %dst.1
84 %dst.2 = getelementptr i32, ptr %dst, i32 2
85 store i32 %mul.2, ptr %dst.2
90 define void @v3_load_i32_mul_add_const_store(ptr %src.1, ptr %src.2, ptr %dst) {
91 ; CHECK-LABEL: @v3_load_i32_mul_add_const_store(
93 ; CHECK-NEXT: [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
94 ; CHECK-NEXT: [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
95 ; CHECK-NEXT: [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
96 ; CHECK-NEXT: [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
97 ; CHECK-NEXT: [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
98 ; CHECK-NEXT: [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
99 ; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
100 ; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[MUL_2]], 9
101 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
102 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
103 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
104 ; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], <i32 9, i32 9>
105 ; CHECK-NEXT: store <2 x i32> [[TMP3]], ptr [[DST:%.*]], align 4
106 ; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
107 ; CHECK-NEXT: store i32 [[ADD_2]], ptr [[DST_2]], align 4
108 ; CHECK-NEXT: ret void
111 %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
112 %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
113 %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
114 %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
115 %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
116 %add.0 = add i32 %mul.0, 9
118 %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
119 %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
120 %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
121 %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
122 %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
123 %add.1 = add i32 %mul.1, 9
125 %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
126 %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
127 %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
128 %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
129 %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
130 %add.2 = add i32 %mul.2, 9
132 store i32 %add.0, ptr %dst
134 %dst.1 = getelementptr i32, ptr %dst, i32 1
135 store i32 %add.1, ptr %dst.1
137 %dst.2 = getelementptr i32, ptr %dst, i32 2
138 store i32 %add.2, ptr %dst.2
143 define void @v3_load_f32_fadd_fadd_by_constant_store(ptr %src, ptr %dst) {
144 ; CHECK-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
146 ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
147 ; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds float, ptr [[SRC]], i32 2
148 ; CHECK-NEXT: [[L_SRC_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
149 ; CHECK-NEXT: [[FADD_2:%.*]] = fadd float [[L_SRC_2]], 1.000000e+01
150 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[GEP_SRC_0]], align 4
151 ; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x float> [[TMP0]], <float 1.000000e+01, float 1.000000e+01>
152 ; CHECK-NEXT: store <2 x float> [[TMP1]], ptr [[DST:%.*]], align 4
153 ; CHECK-NEXT: [[DST_2:%.*]] = getelementptr float, ptr [[DST]], i32 2
154 ; CHECK-NEXT: store float [[FADD_2]], ptr [[DST_2]], align 4
155 ; CHECK-NEXT: ret void
158 %gep.src.0 = getelementptr inbounds float, ptr %src, i32 0
159 %l.src.0 = load float , ptr %gep.src.0, align 4
160 %fadd.0 = fadd float %l.src.0, 10.0
162 %gep.src.1 = getelementptr inbounds float , ptr %src, i32 1
163 %l.src.1 = load float, ptr %gep.src.1, align 4
164 %fadd.1 = fadd float %l.src.1, 10.0
166 %gep.src.2 = getelementptr inbounds float, ptr %src, i32 2
167 %l.src.2 = load float, ptr %gep.src.2, align 4
168 %fadd.2 = fadd float %l.src.2, 10.0
170 store float %fadd.0, ptr %dst
172 %dst.1 = getelementptr float, ptr %dst, i32 1
173 store float %fadd.1, ptr %dst.1
175 %dst.2 = getelementptr float, ptr %dst, i32 2
176 store float %fadd.2, ptr %dst.2
181 define void @phi_store3(ptr %dst) {
182 ; CHECK-LABEL: @phi_store3(
184 ; CHECK-NEXT: br label [[EXIT:%.*]]
185 ; CHECK: invoke.cont8.loopexit:
186 ; CHECK-NEXT: br label [[EXIT]]
188 ; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ 3, [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
189 ; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ <i32 1, i32 2>, [[ENTRY]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT]] ]
190 ; CHECK-NEXT: [[DST_2:%.*]] = getelementptr i32, ptr [[DST:%.*]], i32 2
191 ; CHECK-NEXT: store <2 x i32> [[TMP0]], ptr [[DST]], align 4
192 ; CHECK-NEXT: store i32 [[P_2]], ptr [[DST_2]], align 4
193 ; CHECK-NEXT: ret void
198 invoke.cont8.loopexit: ; No predecessors!
202 %p.0 = phi i32 [ 1, %entry ], [ 0, %invoke.cont8.loopexit ]
203 %p.1 = phi i32 [ 2, %entry ], [ 0, %invoke.cont8.loopexit ]
204 %p.2 = phi i32 [ 3, %entry ], [ 0, %invoke.cont8.loopexit ]
206 %dst.1 = getelementptr i32, ptr %dst, i32 1
207 %dst.2 = getelementptr i32, ptr %dst, i32 2
209 store i32 %p.0, ptr %dst, align 4
210 store i32 %p.1, ptr %dst.1, align 4
211 store i32 %p.2, ptr %dst.2, align 4
215 define void @store_try_reorder(ptr %dst) {
216 ; CHECK-LABEL: @store_try_reorder(
218 ; CHECK-NEXT: [[ADD:%.*]] = add i32 0, 0
219 ; CHECK-NEXT: store i32 [[ADD]], ptr [[DST:%.*]], align 4
220 ; CHECK-NEXT: [[ARRAYIDX_I1887:%.*]] = getelementptr i32, ptr [[DST]], i64 1
221 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[ARRAYIDX_I1887]], align 4
222 ; CHECK-NEXT: ret void
226 store i32 %add, ptr %dst, align 4
227 %add207 = sub i32 0, 0
228 %arrayidx.i1887 = getelementptr i32, ptr %dst, i64 1
229 store i32 %add207, ptr %arrayidx.i1887, align 4
230 %add216 = sub i32 0, 0
231 %arrayidx.i1891 = getelementptr i32, ptr %dst, i64 2
232 store i32 %add216, ptr %arrayidx.i1891, align 4
236 define void @vec3_fpext_cost(ptr %Colour, float %0) {
237 ; CHECK-LABEL: @vec3_fpext_cost(
239 ; CHECK-NEXT: [[ARRAYIDX80:%.*]] = getelementptr float, ptr [[COLOUR:%.*]], i64 2
240 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> poison, float [[TMP0:%.*]], i32 0
241 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
242 ; CHECK-NEXT: [[TMP3:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
243 ; CHECK-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP3]], <2 x double> zeroinitializer, <2 x double> zeroinitializer)
244 ; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <2 x double> [[TMP4]] to <2 x float>
245 ; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[COLOUR]], align 4
246 ; CHECK-NEXT: [[CONV78:%.*]] = fpext float [[TMP0]] to double
247 ; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fmuladd.f64(double [[CONV78]], double 0.000000e+00, double 0.000000e+00)
248 ; CHECK-NEXT: [[CONV82:%.*]] = fptrunc double [[TMP6]] to float
249 ; CHECK-NEXT: store float [[CONV82]], ptr [[ARRAYIDX80]], align 4
250 ; CHECK-NEXT: ret void
253 %arrayidx72 = getelementptr float, ptr %Colour, i64 1
254 %arrayidx80 = getelementptr float, ptr %Colour, i64 2
255 %conv62 = fpext float %0 to double
256 %1 = call double @llvm.fmuladd.f64(double %conv62, double 0.000000e+00, double 0.000000e+00)
257 %conv66 = fptrunc double %1 to float
258 store float %conv66, ptr %Colour, align 4
259 %conv70 = fpext float %0 to double
260 %2 = call double @llvm.fmuladd.f64(double %conv70, double 0.000000e+00, double 0.000000e+00)
261 %conv74 = fptrunc double %2 to float
262 store float %conv74, ptr %arrayidx72, align 4
263 %conv78 = fpext float %0 to double
264 %3 = call double @llvm.fmuladd.f64(double %conv78, double 0.000000e+00, double 0.000000e+00)
265 %conv82 = fptrunc double %3 to float
266 store float %conv82, ptr %arrayidx80, align 4
270 define void @fpext_gather(ptr %dst, double %conv) {
271 ; CHECK-LABEL: @fpext_gather(
273 ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV:%.*]], i32 0
274 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
275 ; CHECK-NEXT: [[TMP2:%.*]] = fptrunc <2 x double> [[TMP1]] to <2 x float>
276 ; CHECK-NEXT: [[LENGTHS:%.*]] = getelementptr float, ptr [[DST:%.*]], i64 0
277 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
278 ; CHECK-NEXT: store float [[TMP3]], ptr [[LENGTHS]], align 4
279 ; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr float, ptr [[DST]], i64 1
280 ; CHECK-NEXT: store <2 x float> [[TMP2]], ptr [[ARRAYIDX32]], align 4
281 ; CHECK-NEXT: ret void
284 %conv25 = fptrunc double %conv to float
285 %Lengths = getelementptr float, ptr %dst, i64 0
286 store float %conv25, ptr %Lengths, align 4
287 %arrayidx32 = getelementptr float, ptr %dst, i64 1
288 store float %conv25, ptr %arrayidx32, align 4
289 %conv34 = fptrunc double %conv to float
290 %arrayidx37 = getelementptr float, ptr %dst, i64 2
291 store float %conv34, ptr %arrayidx37, align 4
295 declare float @llvm.fmuladd.f32(float, float, float)
297 declare double @llvm.fmuladd.f64(double, double, double)