1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
5 target triple = "i386-apple-macosx10.8.0"
14 define i32 @test(double* nocapture %G) {
17 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[G:%.*]], i64 5
18 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
19 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
20 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
21 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <2 x double> <double 4.000000e+00, double 3.000000e+00>, [[TMP1]]
22 ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> <double 1.000000e+00, double 6.000000e+00>, [[TMP2]]
23 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[G]], i64 1
24 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[G]] to <2 x double>*
25 ; CHECK-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
26 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP2]], i32 0
27 ; CHECK-NEXT: [[ADD8:%.*]] = fadd double [[TMP5]], 7.000000e+00
28 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[G]], i64 2
29 ; CHECK-NEXT: store double [[ADD8]], double* [[ARRAYIDX9]], align 8
30 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
31 ; CHECK-NEXT: [[MUL11:%.*]] = fmul double [[TMP6]], 4.000000e+00
32 ; CHECK-NEXT: [[ADD12:%.*]] = fadd double [[MUL11]], 8.000000e+00
33 ; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[G]], i64 3
34 ; CHECK-NEXT: store double [[ADD12]], double* [[ARRAYIDX13]], align 8
35 ; CHECK-NEXT: ret i32 undef
38 %arrayidx = getelementptr inbounds double, double* %G, i64 5
39 %0 = load double, double* %arrayidx, align 8
40 %mul = fmul double %0, 4.000000e+00
41 %add = fadd double %mul, 1.000000e+00
42 store double %add, double* %G, align 8
43 %arrayidx2 = getelementptr inbounds double, double* %G, i64 6
44 %1 = load double, double* %arrayidx2, align 8
45 %mul3 = fmul double %1, 3.000000e+00
46 %add4 = fadd double %mul3, 6.000000e+00
47 %arrayidx5 = getelementptr inbounds double, double* %G, i64 1
48 store double %add4, double* %arrayidx5, align 8
49 %add8 = fadd double %mul, 7.000000e+00
50 %arrayidx9 = getelementptr inbounds double, double* %G, i64 2
51 store double %add8, double* %arrayidx9, align 8
52 %mul11 = fmul double %1, 4.000000e+00
53 %add12 = fadd double %mul11, 8.000000e+00
54 %arrayidx13 = getelementptr inbounds double, double* %G, i64 3
55 store double %add12, double* %arrayidx13, align 8
59 ;int foo(double *A, int n) {
60 ; A[0] = A[0] * 7.9 * n + 6.0;
61 ; A[1] = A[1] * 7.7 * n + 2.0;
62 ; A[2] = A[2] * 7.6 * n + 3.0;
63 ; A[3] = A[3] * 7.4 * n + 4.0;
66 define i32 @foo(double* nocapture %A, i32 %n) {
69 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
70 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
71 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
72 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
73 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <4 x double>*
74 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 8
75 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x double> <double 7.900000e+00, double 7.700000e+00, double 7.600000e+00, double 7.400000e+00>, [[TMP1]]
76 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> undef, double [[CONV]], i32 0
77 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[CONV]], i32 1
78 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x double> [[TMP4]], double [[CONV]], i32 2
79 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x double> [[TMP5]], double [[CONV]], i32 3
80 ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], [[TMP2]]
81 ; CHECK-NEXT: [[TMP8:%.*]] = fadd <4 x double> <double 6.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00>, [[TMP7]]
82 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast double* [[A]] to <4 x double>*
83 ; CHECK-NEXT: store <4 x double> [[TMP8]], <4 x double>* [[TMP9]], align 8
84 ; CHECK-NEXT: ret i32 undef
87 %0 = load double, double* %A, align 8
88 %mul = fmul double %0, 7.900000e+00
89 %conv = sitofp i32 %n to double
90 %mul1 = fmul double %conv, %mul
91 %add = fadd double %mul1, 6.000000e+00
92 store double %add, double* %A, align 8
93 %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
94 %1 = load double, double* %arrayidx3, align 8
95 %mul4 = fmul double %1, 7.700000e+00
96 %mul6 = fmul double %conv, %mul4
97 %add7 = fadd double %mul6, 2.000000e+00
98 store double %add7, double* %arrayidx3, align 8
99 %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
100 %2 = load double, double* %arrayidx9, align 8
101 %mul10 = fmul double %2, 7.600000e+00
102 %mul12 = fmul double %conv, %mul10
103 %add13 = fadd double %mul12, 3.000000e+00
104 store double %add13, double* %arrayidx9, align 8
105 %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
106 %3 = load double, double* %arrayidx15, align 8
107 %mul16 = fmul double %3, 7.400000e+00
108 %mul18 = fmul double %conv, %mul16
109 %add19 = fadd double %mul18, 4.000000e+00
110 store double %add19, double* %arrayidx15, align 8
114 ; int test2(double *G, int k) {
124 ; We can't merge the gather sequences because one does not dominate the other.
126 define i32 @test2(double* nocapture %G, i32 %k) {
127 ; CHECK-LABEL: @test2(
128 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[K:%.*]], 0
129 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, double* [[G:%.*]], i64 5
130 ; CHECK-NEXT: [[TMP3:%.*]] = load double, double* [[TMP2]], align 8
131 ; CHECK-NEXT: [[TMP4:%.*]] = fmul double [[TMP3]], 4.000000e+00
132 ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP14:%.*]], label [[TMP5:%.*]]
133 ; CHECK: [[TMP6:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
134 ; CHECK-NEXT: [[TMP7:%.*]] = load double, double* [[TMP6]], align 8
135 ; CHECK-NEXT: [[TMP8:%.*]] = fmul double [[TMP7]], 3.000000e+00
136 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> undef, double [[TMP4]], i32 0
137 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[TMP9]], double [[TMP8]], i32 1
138 ; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> <double 1.000000e+00, double 6.000000e+00>, [[TMP10]]
139 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, double* [[G]], i64 1
140 ; CHECK-NEXT: [[TMP13:%.*]] = bitcast double* [[G]] to <2 x double>*
141 ; CHECK-NEXT: store <2 x double> [[TMP11]], <2 x double>* [[TMP13]], align 8
142 ; CHECK-NEXT: br label [[TMP24:%.*]]
143 ; CHECK: [[TMP15:%.*]] = getelementptr inbounds double, double* [[G]], i64 2
144 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, double* [[G]], i64 6
145 ; CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[TMP16]], align 8
146 ; CHECK-NEXT: [[TMP18:%.*]] = fmul double [[TMP17]], 3.000000e+00
147 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> undef, double [[TMP4]], i32 0
148 ; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[TMP18]], i32 1
149 ; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> <double 7.000000e+00, double 8.000000e+00>, [[TMP20]]
150 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds double, double* [[G]], i64 3
151 ; CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[TMP15]] to <2 x double>*
152 ; CHECK-NEXT: store <2 x double> [[TMP21]], <2 x double>* [[TMP23]], align 8
153 ; CHECK-NEXT: br label [[TMP24]]
154 ; CHECK: ret i32 undef
156 %1 = icmp eq i32 %k, 0
157 %2 = getelementptr inbounds double, double* %G, i64 5
158 %3 = load double, double* %2, align 8
159 %4 = fmul double %3, 4.000000e+00
160 br i1 %1, label %12, label %5
162 ; <label>:5 ; preds = %0
163 %6 = fadd double %4, 1.000000e+00
164 store double %6, double* %G, align 8
165 %7 = getelementptr inbounds double, double* %G, i64 6
166 %8 = load double, double* %7, align 8
167 %9 = fmul double %8, 3.000000e+00
168 %10 = fadd double %9, 6.000000e+00
169 %11 = getelementptr inbounds double, double* %G, i64 1
170 store double %10, double* %11, align 8
173 ; <label>:12 ; preds = %0
174 %13 = fadd double %4, 7.000000e+00
175 %14 = getelementptr inbounds double, double* %G, i64 2
176 store double %13, double* %14, align 8
177 %15 = getelementptr inbounds double, double* %G, i64 6
178 %16 = load double, double* %15, align 8
179 %17 = fmul double %16, 3.000000e+00
180 %18 = fadd double %17, 8.000000e+00
181 %19 = getelementptr inbounds double, double* %G, i64 3
182 store double %18, double* %19, align 8
185 ; <label>:20 ; preds = %12, %5
190 ;int foo(double *A, int n) {
191 ; A[0] = A[0] * 7.9 * n + 6.0;
192 ; A[1] = A[1] * 7.9 * n + 6.0;
193 ; A[2] = A[2] * 7.9 * n + 6.0;
194 ; A[3] = A[3] * 7.9 * n + 6.0;
197 define i32 @foo4(double* nocapture %A, i32 %n) {
198 ; CHECK-LABEL: @foo4(
200 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
201 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
202 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
203 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
204 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <4 x double>*
205 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 8
206 ; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x double> <double 7.900000e+00, double 7.900000e+00, double 7.900000e+00, double 7.900000e+00>, [[TMP1]]
207 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> undef, double [[CONV]], i32 0
208 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[CONV]], i32 1
209 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x double> [[TMP4]], double [[CONV]], i32 2
210 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x double> [[TMP5]], double [[CONV]], i32 3
211 ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], [[TMP2]]
212 ; CHECK-NEXT: [[TMP8:%.*]] = fadd <4 x double> <double 6.000000e+00, double 6.000000e+00, double 6.000000e+00, double 6.000000e+00>, [[TMP7]]
213 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast double* [[A]] to <4 x double>*
214 ; CHECK-NEXT: store <4 x double> [[TMP8]], <4 x double>* [[TMP9]], align 8
215 ; CHECK-NEXT: ret i32 undef
218 %0 = load double, double* %A, align 8
219 %mul = fmul double %0, 7.900000e+00
220 %conv = sitofp i32 %n to double
221 %mul1 = fmul double %conv, %mul
222 %add = fadd double %mul1, 6.000000e+00
223 store double %add, double* %A, align 8
224 %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
225 %1 = load double, double* %arrayidx3, align 8
226 %mul4 = fmul double %1, 7.900000e+00
227 %mul6 = fmul double %conv, %mul4
228 %add7 = fadd double %mul6, 6.000000e+00
229 store double %add7, double* %arrayidx3, align 8
230 %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
231 %2 = load double, double* %arrayidx9, align 8
232 %mul10 = fmul double %2, 7.900000e+00
233 %mul12 = fmul double %conv, %mul10
234 %add13 = fadd double %mul12, 6.000000e+00
235 store double %add13, double* %arrayidx9, align 8
236 %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
237 %3 = load double, double* %arrayidx15, align 8
238 %mul16 = fmul double %3, 7.900000e+00
239 %mul18 = fmul double %conv, %mul16
240 %add19 = fadd double %mul18, 6.000000e+00
241 store double %add19, double* %arrayidx15, align 8
245 ;int partial_mrg(double *A, int n) {
248 ; if (n < 4) return 0;
250 ; A[3] = A[3] * (n+4);
253 define i32 @partial_mrg(double* nocapture %A, i32 %n) {
254 ; CHECK-LABEL: @partial_mrg(
256 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
257 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
258 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A]] to <2 x double>*
259 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
260 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[CONV]], i32 0
261 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[CONV]], i32 1
262 ; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP3]], [[TMP1]]
263 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[A]] to <2 x double>*
264 ; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
265 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[N]], 4
266 ; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]]
268 ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[A]], i64 2
269 ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[A]], i64 3
270 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[ARRAYIDX7]] to <2 x double>*
271 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x double>, <2 x double>* [[TMP6]], align 8
272 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[N]], 4
273 ; CHECK-NEXT: [[CONV12:%.*]] = sitofp i32 [[ADD]] to double
274 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP2]], double [[CONV12]], i32 1
275 ; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP8]], [[TMP7]]
276 ; CHECK-NEXT: [[TMP10:%.*]] = bitcast double* [[ARRAYIDX7]] to <2 x double>*
277 ; CHECK-NEXT: store <2 x double> [[TMP9]], <2 x double>* [[TMP10]], align 8
278 ; CHECK-NEXT: br label [[RETURN]]
280 ; CHECK-NEXT: ret i32 0
283 %0 = load double, double* %A, align 8
284 %conv = sitofp i32 %n to double
285 %mul = fmul double %conv, %0
286 store double %mul, double* %A, align 8
287 %arrayidx2 = getelementptr inbounds double, double* %A, i64 1
288 %1 = load double, double* %arrayidx2, align 8
289 %mul4 = fmul double %conv, %1
290 store double %mul4, double* %arrayidx2, align 8
291 %cmp = icmp slt i32 %n, 4
292 br i1 %cmp, label %return, label %if.end
294 if.end: ; preds = %entry
295 %arrayidx7 = getelementptr inbounds double, double* %A, i64 2
296 %2 = load double, double* %arrayidx7, align 8
297 %mul9 = fmul double %conv, %2
298 store double %mul9, double* %arrayidx7, align 8
299 %arrayidx11 = getelementptr inbounds double, double* %A, i64 3
300 %3 = load double, double* %arrayidx11, align 8
301 %add = add nsw i32 %n, 4
302 %conv12 = sitofp i32 %add to double
303 %mul13 = fmul double %conv12, %3
304 store double %mul13, double* %arrayidx11, align 8
307 return: ; preds = %entry, %if.end
311 %class.B.53.55 = type { %class.A.52.54, double }
312 %class.A.52.54 = type { double, double, double }
314 @a = external global double, align 8
316 define void @PR19646(%class.B.53.55* %this) {
317 ; CHECK-LABEL: @PR19646(
319 ; CHECK-NEXT: br i1 undef, label [[IF_END13:%.*]], label [[IF_END13]]
321 ; CHECK-NEXT: [[DOTIN:%.*]] = getelementptr inbounds [[CLASS_B_53_55:%.*]], %class.B.53.55* [[THIS:%.*]], i64 0, i32 0, i32 1
322 ; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[DOTIN]], align 8
323 ; CHECK-NEXT: [[ADD:%.*]] = fadd double undef, 0.000000e+00
324 ; CHECK-NEXT: [[ADD6:%.*]] = fadd double [[ADD]], [[TMP0]]
325 ; CHECK-NEXT: [[TMP1:%.*]] = load double, double* @a, align 8
326 ; CHECK-NEXT: [[ADD8:%.*]] = fadd double [[TMP1]], 0.000000e+00
327 ; CHECK-NEXT: [[_DY:%.*]] = getelementptr inbounds [[CLASS_B_53_55]], %class.B.53.55* [[THIS]], i64 0, i32 0, i32 2
328 ; CHECK-NEXT: [[TMP2:%.*]] = load double, double* [[_DY]], align 8
329 ; CHECK-NEXT: [[ADD10:%.*]] = fadd double [[ADD8]], [[TMP2]]
330 ; CHECK-NEXT: br i1 undef, label [[IF_THEN12:%.*]], label [[IF_END13]]
332 ; CHECK-NEXT: [[TMP3:%.*]] = load double, double* undef, align 8
333 ; CHECK-NEXT: br label [[IF_END13]]
335 ; CHECK-NEXT: [[X_1:%.*]] = phi double [ 0.000000e+00, [[IF_THEN12]] ], [ [[ADD6]], [[SW_EPILOG7:%.*]] ], [ undef, [[ENTRY:%.*]] ], [ undef, [[ENTRY]] ]
336 ; CHECK-NEXT: [[B_0:%.*]] = phi double [ [[TMP3]], [[IF_THEN12]] ], [ [[ADD10]], [[SW_EPILOG7]] ], [ undef, [[ENTRY]] ], [ undef, [[ENTRY]] ]
337 ; CHECK-NEXT: unreachable
340 br i1 undef, label %if.end13, label %if.end13
342 sw.epilog7: ; No predecessors!
343 %.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
344 %0 = load double, double* %.in, align 8
345 %add = fadd double undef, 0.000000e+00
346 %add6 = fadd double %add, %0
347 %1 = load double, double* @a, align 8
348 %add8 = fadd double %1, 0.000000e+00
349 %_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
350 %2 = load double, double* %_dy, align 8
351 %add10 = fadd double %add8, %2
352 br i1 undef, label %if.then12, label %if.end13
354 if.then12: ; preds = %sw.epilog7
355 %3 = load double, double* undef, align 8
358 if.end13: ; preds = %if.then12, %sw.epilog7, %entry
359 %x.1 = phi double [ 0.000000e+00, %if.then12 ], [ %add6, %sw.epilog7 ], [ undef, %entry ], [ undef, %entry ]
360 %b.0 = phi double [ %3, %if.then12 ], [ %add10, %sw.epilog7 ], [ undef, %entry], [ undef, %entry ]