1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-100 -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
5 target triple = "i386-apple-macosx10.9.0"
7 ;int foo(double *A, int k) {
22 define i32 @foo(double* nocapture %A, i32 %k) {
25 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[K:%.*]], 0
26 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_ELSE:%.*]], label [[IF_END:%.*]]
28 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 10
29 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
30 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
31 ; CHECK-NEXT: br label [[IF_END]]
33 ; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[IF_ELSE]] ], [ <double 3.000000e+00, double 5.000000e+00>, [[ENTRY:%.*]] ]
34 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[A]] to <2 x double>*
35 ; CHECK-NEXT: store <2 x double> [[TMP2]], <2 x double>* [[TMP3]], align 8
36 ; CHECK-NEXT: ret i32 undef
39 %tobool = icmp eq i32 %k, 0
40 br i1 %tobool, label %if.else, label %if.end
42 if.else: ; preds = %entry
43 %arrayidx = getelementptr inbounds double, double* %A, i64 10
44 %0 = load double, double* %arrayidx, align 8
45 %arrayidx1 = getelementptr inbounds double, double* %A, i64 11
46 %1 = load double, double* %arrayidx1, align 8
49 if.end: ; preds = %entry, %if.else
50 %A0.0 = phi double [ %0, %if.else ], [ 3.000000e+00, %entry ]
51 %A1.0 = phi double [ %1, %if.else ], [ 5.000000e+00, %entry ]
52 store double %A0.0, double* %A, align 8
53 %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
54 store double %A1.0, double* %arrayidx3, align 8
59 ;int foo(double * restrict B, double * restrict A, int n, int m) {
62 ; for (int i=0; i < 100; i++) {
75 define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
78 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
79 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
80 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
82 ; CHECK-NEXT: [[I_019:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
83 ; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
84 ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> <double 1.000000e+01, double 1.000000e+01>, [[TMP2]]
85 ; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP3]]
86 ; CHECK-NEXT: [[TMP5]] = fadd <2 x double> <double 4.000000e+00, double 4.000000e+00>, [[TMP4]]
87 ; CHECK-NEXT: [[INC]] = add nsw i32 [[I_019]], 1
88 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 100
89 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
91 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
92 ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
93 ; CHECK-NEXT: ret i32 0
96 %arrayidx = getelementptr inbounds double, double* %A, i64 1
97 %0 = load double, double* %arrayidx, align 8
98 %1 = load double, double* %A, align 8
101 for.body: ; preds = %for.body, %entry
102 %i.019 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
103 %G.018 = phi double [ %1, %entry ], [ %add5, %for.body ]
104 %R.017 = phi double [ %0, %entry ], [ %add4, %for.body ]
105 %add = fadd double %R.017, 1.000000e+01
106 %add2 = fadd double %G.018, 1.000000e+01
107 %mul = fmul double %add, 4.000000e+00
108 %mul3 = fmul double %add2, 4.000000e+00
109 %add4 = fadd double %mul, 4.000000e+00
110 %add5 = fadd double %mul3, 4.000000e+00
111 %inc = add nsw i32 %i.019, 1
112 %exitcond = icmp eq i32 %inc, 100
113 br i1 %exitcond, label %for.end, label %for.body
115 for.end: ; preds = %for.body
116 store double %add5, double* %B, align 8
117 %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
118 store double %add4, double* %arrayidx7, align 8
122 ; float foo3(float *A) {
129 ; for (int i=0; i < 121; i+=3) {
140 define float @foo3(float* nocapture readonly %A) #0 {
141 ; CHECK-LABEL: @foo3(
143 ; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[A:%.*]], align 4
144 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
145 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[ARRAYIDX1]] to <4 x float>*
146 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
147 ; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
148 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[REORDER_SHUFFLE]], i32 3
149 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> undef, float [[TMP0]], i32 0
150 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x float> [[TMP4]], float [[TMP3]], i32 1
151 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
153 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
154 ; CHECK-NEXT: [[TMP6:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[TMP18:%.*]], [[FOR_BODY]] ]
155 ; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x float> [ [[REORDER_SHUFFLE]], [[ENTRY]] ], [ [[TMP23:%.*]], [[FOR_BODY]] ]
156 ; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x float> [ [[TMP5]], [[ENTRY]] ], [ [[TMP26:%.*]], [[FOR_BODY]] ]
157 ; CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP6]], 7.000000e+00
158 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
159 ; CHECK-NEXT: [[ADD6:%.*]] = fadd float [[TMP9]], [[MUL]]
160 ; CHECK-NEXT: [[TMP10:%.*]] = add nsw i64 [[INDVARS_IV]], 2
161 ; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP10]]
162 ; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX14]], align 4
163 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 3
164 ; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV_NEXT]]
165 ; CHECK-NEXT: [[TMP12:%.*]] = bitcast float* [[ARRAYIDX19]] to <2 x float>*
166 ; CHECK-NEXT: [[TMP13:%.*]] = load <2 x float>, <2 x float>* [[TMP12]], align 4
167 ; CHECK-NEXT: [[REORDER_SHUFFLE1:%.*]] = shufflevector <2 x float> [[TMP13]], <2 x float> undef, <2 x i32> <i32 1, i32 0>
168 ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
169 ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x float> <float 1.100000e+01, float 1.000000e+01, float 9.000000e+00, float undef>, float [[TMP14]], i32 3
170 ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 0
171 ; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x float> undef, float [[TMP16]], i32 0
172 ; CHECK-NEXT: [[TMP18]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 1
173 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x float> [[TMP17]], float [[TMP18]], i32 1
174 ; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x float> [[TMP19]], float [[TMP11]], i32 2
175 ; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x float> [[TMP20]], float 8.000000e+00, i32 3
176 ; CHECK-NEXT: [[TMP22:%.*]] = fmul <4 x float> [[TMP15]], [[TMP21]]
177 ; CHECK-NEXT: [[TMP23]] = fadd <4 x float> [[TMP7]], [[TMP22]]
178 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
179 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP24]], 121
180 ; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x float> undef, float [[ADD6]], i32 0
181 ; CHECK-NEXT: [[TMP26]] = insertelement <2 x float> [[TMP25]], float [[TMP16]], i32 1
182 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
184 ; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x float> [[TMP23]], i32 3
185 ; CHECK-NEXT: [[ADD28:%.*]] = fadd float [[ADD6]], [[TMP27]]
186 ; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x float> [[TMP23]], i32 2
187 ; CHECK-NEXT: [[ADD29:%.*]] = fadd float [[ADD28]], [[TMP28]]
188 ; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x float> [[TMP23]], i32 1
189 ; CHECK-NEXT: [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP29]]
190 ; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x float> [[TMP23]], i32 0
191 ; CHECK-NEXT: [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP30]]
192 ; CHECK-NEXT: ret float [[ADD31]]
195 %0 = load float, float* %A, align 4
196 %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
197 %1 = load float, float* %arrayidx1, align 4
198 %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
199 %2 = load float, float* %arrayidx2, align 4
200 %arrayidx3 = getelementptr inbounds float, float* %A, i64 3
201 %3 = load float, float* %arrayidx3, align 4
202 %arrayidx4 = getelementptr inbounds float, float* %A, i64 4
203 %4 = load float, float* %arrayidx4, align 4
206 for.body: ; preds = %for.body, %entry
207 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
208 %P.056 = phi float [ %4, %entry ], [ %add26, %for.body ]
209 %Y.055 = phi float [ %3, %entry ], [ %add21, %for.body ]
210 %B.054 = phi float [ %2, %entry ], [ %add16, %for.body ]
211 %G.053 = phi float [ %1, %entry ], [ %add11, %for.body ]
212 %R.052 = phi float [ %0, %entry ], [ %add6, %for.body ]
213 %5 = phi float [ %1, %entry ], [ %11, %for.body ]
214 %6 = phi float [ %0, %entry ], [ %9, %for.body ]
215 %mul = fmul float %6, 7.000000e+00
216 %add6 = fadd float %R.052, %mul
217 %mul10 = fmul float %5, 8.000000e+00
218 %add11 = fadd float %G.053, %mul10
219 %7 = add nsw i64 %indvars.iv, 2
220 %arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
221 %8 = load float, float* %arrayidx14, align 4
222 %mul15 = fmul float %8, 9.000000e+00
223 %add16 = fadd float %B.054, %mul15
224 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
225 %arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
226 %9 = load float, float* %arrayidx19, align 4
227 %mul20 = fmul float %9, 1.000000e+01
228 %add21 = fadd float %Y.055, %mul20
229 %10 = add nsw i64 %indvars.iv, 4
230 %arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
231 %11 = load float, float* %arrayidx24, align 4
232 %mul25 = fmul float %11, 1.100000e+01
233 %add26 = fadd float %P.056, %mul25
234 %12 = trunc i64 %indvars.iv.next to i32
235 %cmp = icmp slt i32 %12, 121
236 br i1 %cmp, label %for.body, label %for.end
238 for.end: ; preds = %for.body
239 %add28 = fadd float %add6, %add11
240 %add29 = fadd float %add28, %add16
241 %add30 = fadd float %add29, %add21
242 %add31 = fadd float %add30, %add26
246 ; Make sure the order of phi nodes of different types does not prevent
247 ; vectorization of same typed phi nodes.
248 define float @sort_phi_type(float* nocapture readonly %A) {
249 ; CHECK-LABEL: @sort_phi_type(
251 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
253 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
254 ; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x float> [ <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, [[ENTRY]] ], [ [[TMP9:%.*]], [[FOR_BODY]] ]
255 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
256 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[TMP1]], i32 0
257 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
258 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[TMP2]], float [[TMP3]], i32 1
259 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
260 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP4]], float [[TMP5]], i32 2
261 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
262 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP7]], i32 3
263 ; CHECK-NEXT: [[TMP9]] = fmul <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+02, float 1.110000e+02>, [[TMP8]]
264 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], 4
265 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], 128
266 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
268 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[TMP9]], i32 0
269 ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x float> [[TMP9]], i32 1
270 ; CHECK-NEXT: [[ADD29:%.*]] = fadd float [[TMP10]], [[TMP11]]
271 ; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x float> [[TMP9]], i32 2
272 ; CHECK-NEXT: [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP12]]
273 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x float> [[TMP9]], i32 3
274 ; CHECK-NEXT: [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP13]]
275 ; CHECK-NEXT: ret float [[ADD31]]
280 for.body: ; preds = %for.body, %entry
281 %Y = phi float [ 1.000000e+01, %entry ], [ %mul10, %for.body ]
282 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
283 %B = phi float [ 1.000000e+01, %entry ], [ %mul15, %for.body ]
284 %G = phi float [ 1.000000e+01, %entry ], [ %mul20, %for.body ]
285 %R = phi float [ 1.000000e+01, %entry ], [ %mul25, %for.body ]
286 %mul10 = fmul float %Y, 8.000000e+00
287 %mul15 = fmul float %B, 9.000000e+00
288 %mul20 = fmul float %R, 10.000000e+01
289 %mul25 = fmul float %G, 11.100000e+01
290 %indvars.iv.next = add nsw i64 %indvars.iv, 4
291 %cmp = icmp slt i64 %indvars.iv.next, 128
292 br i1 %cmp, label %for.body, label %for.end
294 for.end: ; preds = %for.body
295 %add28 = fadd float 1.000000e+01, %mul10
296 %add29 = fadd float %mul10, %mul15
297 %add30 = fadd float %add29, %mul20
298 %add31 = fadd float %add30, %mul25
302 define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
303 ; CHECK-LABEL: @test(
305 ; CHECK-NEXT: [[I1_0:%.*]] = load x86_fp80, x86_fp80* [[I1:%.*]], align 16
306 ; CHECK-NEXT: [[I1_GEP1:%.*]] = getelementptr x86_fp80, x86_fp80* [[I1]], i64 1
307 ; CHECK-NEXT: [[I1_1:%.*]] = load x86_fp80, x86_fp80* [[I1_GEP1]], align 16
308 ; CHECK-NEXT: br i1 undef, label [[THEN:%.*]], label [[END:%.*]]
310 ; CHECK-NEXT: [[I2_GEP0:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2:%.*]], i64 0
311 ; CHECK-NEXT: [[I2_0:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP0]], align 16
312 ; CHECK-NEXT: [[I2_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2]], i64 1
313 ; CHECK-NEXT: [[I2_1:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP1]], align 16
314 ; CHECK-NEXT: br label [[END]]
316 ; CHECK-NEXT: [[PHI0:%.*]] = phi x86_fp80 [ [[I1_0]], [[ENTRY:%.*]] ], [ [[I2_0]], [[THEN]] ]
317 ; CHECK-NEXT: [[PHI1:%.*]] = phi x86_fp80 [ [[I1_1]], [[ENTRY]] ], [ [[I2_1]], [[THEN]] ]
318 ; CHECK-NEXT: store x86_fp80 [[PHI0]], x86_fp80* [[O:%.*]], align 16
319 ; CHECK-NEXT: [[O_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[O]], i64 1
320 ; CHECK-NEXT: store x86_fp80 [[PHI1]], x86_fp80* [[O_GEP1]], align 16
321 ; CHECK-NEXT: ret void
323 ; Test that we correctly recognize the discontiguous memory in arrays where the
324 ; size is less than the alignment, and through various different GEP formations.
325 ; We disable the vectorization of x86_fp80 for now.
328 %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
329 %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
330 %i1.1 = load x86_fp80, x86_fp80* %i1.gep1, align 16
331 br i1 undef, label %then, label %end
334 %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
335 %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
336 %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
337 %i2.1 = load x86_fp80, x86_fp80* %i2.gep1, align 16
341 %phi0 = phi x86_fp80 [ %i1.0, %entry ], [ %i2.0, %then ]
342 %phi1 = phi x86_fp80 [ %i1.1, %entry ], [ %i2.1, %then ]
343 store x86_fp80 %phi0, x86_fp80* %o, align 16
344 %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
345 store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16