1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
5 target triple = "i386-apple-macosx10.8.0"
14 define i32 @test(double* nocapture %G) {
17 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[G:%.*]], i64 5
18 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
19 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
20 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
21 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <2 x double> [[TMP1]], <double 4.000000e+00, double 3.000000e+00>
22 ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+00, double 6.000000e+00>
23 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[G]], i64 1
24 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[G]] to <2 x double>*
25 ; CHECK-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
26 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP2]], i32 0
27 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[G]], i64 2
28 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
29 ; CHECK-NEXT: [[MUL11:%.*]] = fmul double [[TMP6]], 4.000000e+00
30 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> undef, double [[TMP5]], i32 0
31 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[MUL11]], i32 1
32 ; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[TMP8]], <double 7.000000e+00, double 8.000000e+00>
33 ; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[G]], i64 3
34 ; CHECK-NEXT: [[TMP10:%.*]] = bitcast double* [[ARRAYIDX9]] to <2 x double>*
35 ; CHECK-NEXT: store <2 x double> [[TMP9]], <2 x double>* [[TMP10]], align 8
36 ; CHECK-NEXT: ret i32 undef
39 %arrayidx = getelementptr inbounds double, double* %G, i64 5
40 %0 = load double, double* %arrayidx, align 8
41 %mul = fmul double %0, 4.000000e+00
42 %add = fadd double %mul, 1.000000e+00
43 store double %add, double* %G, align 8
44 %arrayidx2 = getelementptr inbounds double, double* %G, i64 6
45 %1 = load double, double* %arrayidx2, align 8
46 %mul3 = fmul double %1, 3.000000e+00
47 %add4 = fadd double %mul3, 6.000000e+00
48 %arrayidx5 = getelementptr inbounds double, double* %G, i64 1
49 store double %add4, double* %arrayidx5, align 8
50 %add8 = fadd double %mul, 7.000000e+00
51 %arrayidx9 = getelementptr inbounds double, double* %G, i64 2
52 store double %add8, double* %arrayidx9, align 8
53 %mul11 = fmul double %1, 4.000000e+00
54 %add12 = fadd double %mul11, 8.000000e+00
55 %arrayidx13 = getelementptr inbounds double, double* %G, i64 3
56 store double %add12, double* %arrayidx13, align 8
60 ;int foo(double *A, int n) {
61 ; A[0] = A[0] * 7.9 * n + 6.0;
62 ; A[1] = A[1] * 7.7 * n + 2.0;
63 ; A[2] = A[2] * 7.6 * n + 3.0;
64 ; A[3] = A[3] * 7.4 * n + 4.0;
67 define i32 @foo(double* nocapture %A, i32 %n) {
70 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
71 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
72 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
73 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
74 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <4 x double>*
75 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 8
76 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x double> [[TMP1]], <double 7.900000e+00, double 7.700000e+00, double 7.600000e+00, double 7.400000e+00>
77 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> undef, double [[CONV]], i32 0
78 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[CONV]], i32 1
79 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x double> [[TMP4]], double [[CONV]], i32 2
80 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x double> [[TMP5]], double [[CONV]], i32 3
81 ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], [[TMP2]]
82 ; CHECK-NEXT: [[TMP8:%.*]] = fadd <4 x double> [[TMP7]], <double 6.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00>
83 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast double* [[A]] to <4 x double>*
84 ; CHECK-NEXT: store <4 x double> [[TMP8]], <4 x double>* [[TMP9]], align 8
85 ; CHECK-NEXT: ret i32 undef
88 %0 = load double, double* %A, align 8
89 %mul = fmul double %0, 7.900000e+00
90 %conv = sitofp i32 %n to double
91 %mul1 = fmul double %conv, %mul
92 %add = fadd double %mul1, 6.000000e+00
93 store double %add, double* %A, align 8
94 %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
95 %1 = load double, double* %arrayidx3, align 8
96 %mul4 = fmul double %1, 7.700000e+00
97 %mul6 = fmul double %conv, %mul4
98 %add7 = fadd double %mul6, 2.000000e+00
99 store double %add7, double* %arrayidx3, align 8
100 %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
101 %2 = load double, double* %arrayidx9, align 8
102 %mul10 = fmul double %2, 7.600000e+00
103 %mul12 = fmul double %conv, %mul10
104 %add13 = fadd double %mul12, 3.000000e+00
105 store double %add13, double* %arrayidx9, align 8
106 %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
107 %3 = load double, double* %arrayidx15, align 8
108 %mul16 = fmul double %3, 7.400000e+00
109 %mul18 = fmul double %conv, %mul16
110 %add19 = fadd double %mul18, 4.000000e+00
111 store double %add19, double* %arrayidx15, align 8
115 ; int test2(double *G, int k) {
125 ; We can't merge the gather sequences because one does not dominate the other.
127 define i32 @test2(double* nocapture %G, i32 %k) {
128 ; CHECK-LABEL: @test2(
129 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[K:%.*]], 0
130 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, double* [[G:%.*]], i64 5
131 ; CHECK-NEXT: [[TMP3:%.*]] = load double, double* [[TMP2]], align 8
132 ; CHECK-NEXT: [[TMP4:%.*]] = fmul double [[TMP3]], 4.000000e+00
133 ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP14:%.*]], label [[TMP5:%.*]]
134 ; CHECK: [[TMP6:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
135 ; CHECK-NEXT: [[TMP7:%.*]] = load double, double* [[TMP6]], align 8
136 ; CHECK-NEXT: [[TMP8:%.*]] = fmul double [[TMP7]], 3.000000e+00
137 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> undef, double [[TMP4]], i32 0
138 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[TMP9]], double [[TMP8]], i32 1
139 ; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], <double 1.000000e+00, double 6.000000e+00>
140 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, double* [[G]], i64 1
141 ; CHECK-NEXT: [[TMP13:%.*]] = bitcast double* [[G]] to <2 x double>*
142 ; CHECK-NEXT: store <2 x double> [[TMP11]], <2 x double>* [[TMP13]], align 8
143 ; CHECK-NEXT: br label [[TMP24:%.*]]
144 ; CHECK: [[TMP15:%.*]] = getelementptr inbounds double, double* [[G]], i64 2
145 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
146 ; CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[TMP16]], align 8
147 ; CHECK-NEXT: [[TMP18:%.*]] = fmul double [[TMP17]], 3.000000e+00
148 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> undef, double [[TMP4]], i32 0
149 ; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[TMP18]], i32 1
150 ; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[TMP20]], <double 7.000000e+00, double 8.000000e+00>
151 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds double, double* [[G]], i64 3
152 ; CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[TMP15]] to <2 x double>*
153 ; CHECK-NEXT: store <2 x double> [[TMP21]], <2 x double>* [[TMP23]], align 8
154 ; CHECK-NEXT: br label [[TMP24]]
155 ; CHECK: ret i32 undef
157 %1 = icmp eq i32 %k, 0
158 %2 = getelementptr inbounds double, double* %G, i64 5
159 %3 = load double, double* %2, align 8
160 %4 = fmul double %3, 4.000000e+00
161 br i1 %1, label %12, label %5
163 ; <label>:5 ; preds = %0
164 %6 = fadd double %4, 1.000000e+00
165 store double %6, double* %G, align 8
166 %7 = getelementptr inbounds double, double* %G, i64 6
167 %8 = load double, double* %7, align 8
168 %9 = fmul double %8, 3.000000e+00
169 %10 = fadd double %9, 6.000000e+00
170 %11 = getelementptr inbounds double, double* %G, i64 1
171 store double %10, double* %11, align 8
174 ; <label>:12 ; preds = %0
175 %13 = fadd double %4, 7.000000e+00
176 %14 = getelementptr inbounds double, double* %G, i64 2
177 store double %13, double* %14, align 8
178 %15 = getelementptr inbounds double, double* %G, i64 6
179 %16 = load double, double* %15, align 8
180 %17 = fmul double %16, 3.000000e+00
181 %18 = fadd double %17, 8.000000e+00
182 %19 = getelementptr inbounds double, double* %G, i64 3
183 store double %18, double* %19, align 8
186 ; <label>:20 ; preds = %12, %5
191 ;int foo(double *A, int n) {
192 ; A[0] = A[0] * 7.9 * n + 6.0;
193 ; A[1] = A[1] * 7.9 * n + 6.0;
194 ; A[2] = A[2] * 7.9 * n + 6.0;
195 ; A[3] = A[3] * 7.9 * n + 6.0;
198 define i32 @foo4(double* nocapture %A, i32 %n) {
199 ; CHECK-LABEL: @foo4(
201 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
202 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
203 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
204 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
205 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <4 x double>*
206 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 8
207 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x double> [[TMP1]], <double 7.900000e+00, double 7.900000e+00, double 7.900000e+00, double 7.900000e+00>
208 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> undef, double [[CONV]], i32 0
209 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[CONV]], i32 1
210 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x double> [[TMP4]], double [[CONV]], i32 2
211 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x double> [[TMP5]], double [[CONV]], i32 3
212 ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], [[TMP2]]
213 ; CHECK-NEXT: [[TMP8:%.*]] = fadd <4 x double> [[TMP7]], <double 6.000000e+00, double 6.000000e+00, double 6.000000e+00, double 6.000000e+00>
214 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast double* [[A]] to <4 x double>*
215 ; CHECK-NEXT: store <4 x double> [[TMP8]], <4 x double>* [[TMP9]], align 8
216 ; CHECK-NEXT: ret i32 undef
219 %0 = load double, double* %A, align 8
220 %mul = fmul double %0, 7.900000e+00
221 %conv = sitofp i32 %n to double
222 %mul1 = fmul double %conv, %mul
223 %add = fadd double %mul1, 6.000000e+00
224 store double %add, double* %A, align 8
225 %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
226 %1 = load double, double* %arrayidx3, align 8
227 %mul4 = fmul double %1, 7.900000e+00
228 %mul6 = fmul double %conv, %mul4
229 %add7 = fadd double %mul6, 6.000000e+00
230 store double %add7, double* %arrayidx3, align 8
231 %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
232 %2 = load double, double* %arrayidx9, align 8
233 %mul10 = fmul double %2, 7.900000e+00
234 %mul12 = fmul double %conv, %mul10
235 %add13 = fadd double %mul12, 6.000000e+00
236 store double %add13, double* %arrayidx9, align 8
237 %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
238 %3 = load double, double* %arrayidx15, align 8
239 %mul16 = fmul double %3, 7.900000e+00
240 %mul18 = fmul double %conv, %mul16
241 %add19 = fadd double %mul18, 6.000000e+00
242 store double %add19, double* %arrayidx15, align 8
246 ;int partial_mrg(double *A, int n) {
249 ; if (n < 4) return 0;
251 ; A[3] = A[3] * (n+4);
254 define i32 @partial_mrg(double* nocapture %A, i32 %n) {
255 ; CHECK-LABEL: @partial_mrg(
257 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
258 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
259 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <2 x double>*
260 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
261 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[CONV]], i32 0
262 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[CONV]], i32 1
263 ; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP3]], [[TMP1]]
264 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[A]] to <2 x double>*
265 ; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
266 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[N]], 4
267 ; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]]
269 ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
270 ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
271 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[ARRAYIDX7]] to <2 x double>*
272 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x double>, <2 x double>* [[TMP6]], align 8
273 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[N]], 4
274 ; CHECK-NEXT: [[CONV12:%.*]] = sitofp i32 [[ADD]] to double
275 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP2]], double [[CONV12]], i32 1
276 ; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP8]], [[TMP7]]
277 ; CHECK-NEXT: [[TMP10:%.*]] = bitcast double* [[ARRAYIDX7]] to <2 x double>*
278 ; CHECK-NEXT: store <2 x double> [[TMP9]], <2 x double>* [[TMP10]], align 8
279 ; CHECK-NEXT: br label [[RETURN]]
281 ; CHECK-NEXT: ret i32 0
284 %0 = load double, double* %A, align 8
285 %conv = sitofp i32 %n to double
286 %mul = fmul double %conv, %0
287 store double %mul, double* %A, align 8
288 %arrayidx2 = getelementptr inbounds double, double* %A, i64 1
289 %1 = load double, double* %arrayidx2, align 8
290 %mul4 = fmul double %conv, %1
291 store double %mul4, double* %arrayidx2, align 8
292 %cmp = icmp slt i32 %n, 4
293 br i1 %cmp, label %return, label %if.end
295 if.end: ; preds = %entry
296 %arrayidx7 = getelementptr inbounds double, double* %A, i64 2
297 %2 = load double, double* %arrayidx7, align 8
298 %mul9 = fmul double %conv, %2
299 store double %mul9, double* %arrayidx7, align 8
300 %arrayidx11 = getelementptr inbounds double, double* %A, i64 3
301 %3 = load double, double* %arrayidx11, align 8
302 %add = add nsw i32 %n, 4
303 %conv12 = sitofp i32 %add to double
304 %mul13 = fmul double %conv12, %3
305 store double %mul13, double* %arrayidx11, align 8
308 return: ; preds = %entry, %if.end
312 %class.B.53.55 = type { %class.A.52.54, double }
313 %class.A.52.54 = type { double, double, double }
315 @a = external global double, align 8
317 define void @PR19646(%class.B.53.55* %this) {
318 ; CHECK-LABEL: @PR19646(
320 ; CHECK-NEXT: br i1 undef, label [[IF_END13:%.*]], label [[IF_END13]]
322 ; CHECK-NEXT: [[DOTIN:%.*]] = getelementptr inbounds [[CLASS_B_53_55:%.*]], %class.B.53.55* [[THIS:%.*]], i64 0, i32 0, i32 1
323 ; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[DOTIN]], align 8
324 ; CHECK-NEXT: [[ADD:%.*]] = fadd double undef, 0.000000e+00
325 ; CHECK-NEXT: [[ADD6:%.*]] = fadd double [[ADD]], [[TMP0]]
326 ; CHECK-NEXT: [[TMP1:%.*]] = load double, double* @a, align 8
327 ; CHECK-NEXT: [[ADD8:%.*]] = fadd double [[TMP1]], 0.000000e+00
328 ; CHECK-NEXT: [[_DY:%.*]] = getelementptr inbounds [[CLASS_B_53_55]], %class.B.53.55* [[THIS]], i64 0, i32 0, i32 2
329 ; CHECK-NEXT: [[TMP2:%.*]] = load double, double* [[_DY]], align 8
330 ; CHECK-NEXT: [[ADD10:%.*]] = fadd double [[ADD8]], [[TMP2]]
331 ; CHECK-NEXT: br i1 undef, label [[IF_THEN12:%.*]], label [[IF_END13]]
333 ; CHECK-NEXT: [[TMP3:%.*]] = load double, double* undef, align 8
334 ; CHECK-NEXT: br label [[IF_END13]]
336 ; CHECK-NEXT: [[X_1:%.*]] = phi double [ 0.000000e+00, [[IF_THEN12]] ], [ [[ADD6]], [[SW_EPILOG7:%.*]] ], [ undef, [[ENTRY:%.*]] ], [ undef, [[ENTRY]] ]
337 ; CHECK-NEXT: [[B_0:%.*]] = phi double [ [[TMP3]], [[IF_THEN12]] ], [ [[ADD10]], [[SW_EPILOG7]] ], [ undef, [[ENTRY]] ], [ undef, [[ENTRY]] ]
338 ; CHECK-NEXT: unreachable
341 br i1 undef, label %if.end13, label %if.end13
343 sw.epilog7: ; No predecessors!
344 %.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
345 %0 = load double, double* %.in, align 8
346 %add = fadd double undef, 0.000000e+00
347 %add6 = fadd double %add, %0
348 %1 = load double, double* @a, align 8
349 %add8 = fadd double %1, 0.000000e+00
350 %_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
351 %2 = load double, double* %_dy, align 8
352 %add10 = fadd double %add8, %2
353 br i1 undef, label %if.then12, label %if.end13
355 if.then12: ; preds = %sw.epilog7
356 %3 = load double, double* undef, align 8
359 if.end13: ; preds = %if.then12, %sw.epilog7, %entry
360 %x.1 = phi double [ 0.000000e+00, %if.then12 ], [ %add6, %sw.epilog7 ], [ undef, %entry ], [ undef, %entry ]
361 %b.0 = phi double [ %3, %if.then12 ], [ %add10, %sw.epilog7 ], [ undef, %entry], [ undef, %entry ]