1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s
8 ;int test1(unsigned int *p) {
11 ; for (int y = 0; y < 2; y++) {
12 ; // Inner loop gets unrolled
13 ; for (int x = 0; x < 8; x++) {
16 ; // Dummy call to keep outer loop alive
22 define i32 @test(ptr nocapture readonly %p) {
25 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
27 ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
28 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
29 ; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
30 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
31 ; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP3]], [[SUM]]
32 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
34 ; CHECK-NEXT: ret i32 [[OP_RDX]]
37 %arrayidx.1 = getelementptr inbounds i32, ptr %p, i64 1
38 %arrayidx.2 = getelementptr inbounds i32, ptr %p, i64 2
39 %arrayidx.3 = getelementptr inbounds i32, ptr %p, i64 3
40 %arrayidx.4 = getelementptr inbounds i32, ptr %p, i64 4
41 %arrayidx.5 = getelementptr inbounds i32, ptr %p, i64 5
42 %arrayidx.6 = getelementptr inbounds i32, ptr %p, i64 6
43 %arrayidx.7 = getelementptr inbounds i32, ptr %p, i64 7
47 %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
48 %tmp = load i32, ptr %p, align 4
49 %mul = mul i32 %tmp, 42
50 %add = add i32 %mul, %sum
51 %tmp5 = load i32, ptr %arrayidx.1, align 4
52 %mul.1 = mul i32 %tmp5, 42
53 %add.1 = add i32 %mul.1, %add
54 %tmp6 = load i32, ptr %arrayidx.2, align 4
55 %mul.2 = mul i32 %tmp6, 42
56 %add.2 = add i32 %mul.2, %add.1
57 %tmp7 = load i32, ptr %arrayidx.3, align 4
58 %mul.3 = mul i32 %tmp7, 42
59 %add.3 = add i32 %mul.3, %add.2
60 %tmp8 = load i32, ptr %arrayidx.4, align 4
61 %mul.4 = mul i32 %tmp8, 42
62 %add.4 = add i32 %mul.4, %add.3
63 %tmp9 = load i32, ptr %arrayidx.5, align 4
64 %mul.5 = mul i32 %tmp9, 42
65 %add.5 = add i32 %mul.5, %add.4
66 %tmp10 = load i32, ptr %arrayidx.6, align 4
67 %mul.6 = mul i32 %tmp10, 42
68 %add.6 = add i32 %mul.6, %add.5
69 %tmp11 = load i32, ptr %arrayidx.7, align 4
70 %mul.7 = mul i32 %tmp11, 42
71 %add.7 = add i32 %mul.7, %add.6
72 br i1 true, label %for.end, label %for.body
80 ;int test2(unsigned int *p, unsigned int *q) {
83 ; for (int y = 0; y < 2; y++) {
84 ; // Inner loop gets unrolled
85 ; for (int x = 0; x < 8; x++) {
88 ; // Dummy call to keep outer loop alive
94 define i32 @test2(ptr nocapture readonly %p, ptr nocapture readonly %q) {
95 ; CHECK-LABEL: @test2(
97 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
99 ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
100 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
101 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[Q:%.*]], align 4
102 ; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[TMP3]]
103 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
104 ; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
105 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
107 ; CHECK-NEXT: ret i32 [[OP_RDX]]
110 %arrayidx.p.1 = getelementptr inbounds i32, ptr %p, i64 1
111 %arrayidx.p.2 = getelementptr inbounds i32, ptr %p, i64 2
112 %arrayidx.p.3 = getelementptr inbounds i32, ptr %p, i64 3
113 %arrayidx.p.4 = getelementptr inbounds i32, ptr %p, i64 4
114 %arrayidx.p.5 = getelementptr inbounds i32, ptr %p, i64 5
115 %arrayidx.p.6 = getelementptr inbounds i32, ptr %p, i64 6
116 %arrayidx.p.7 = getelementptr inbounds i32, ptr %p, i64 7
118 %arrayidx.q.1 = getelementptr inbounds i32, ptr %q, i64 1
119 %arrayidx.q.2 = getelementptr inbounds i32, ptr %q, i64 2
120 %arrayidx.q.3 = getelementptr inbounds i32, ptr %q, i64 3
121 %arrayidx.q.4 = getelementptr inbounds i32, ptr %q, i64 4
122 %arrayidx.q.5 = getelementptr inbounds i32, ptr %q, i64 5
123 %arrayidx.q.6 = getelementptr inbounds i32, ptr %q, i64 6
124 %arrayidx.q.7 = getelementptr inbounds i32, ptr %q, i64 7
128 %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
129 %tmpp = load i32, ptr %p, align 4
130 %tmpq = load i32, ptr %q, align 4
131 %mul = mul i32 %tmpp, %tmpq
132 %add = add i32 %mul, %sum
133 %tmp5p = load i32, ptr %arrayidx.p.1, align 4
134 %tmp5q = load i32, ptr %arrayidx.q.1, align 4
135 %mul.1 = mul i32 %tmp5p, %tmp5q
136 %add.1 = add i32 %mul.1, %add
137 %tmp6p = load i32, ptr %arrayidx.p.2, align 4
138 %tmp6q = load i32, ptr %arrayidx.q.2, align 4
139 %mul.2 = mul i32 %tmp6p, %tmp6q
140 %add.2 = add i32 %mul.2, %add.1
141 %tmp7p = load i32, ptr %arrayidx.p.3, align 4
142 %tmp7q = load i32, ptr %arrayidx.q.3, align 4
143 %mul.3 = mul i32 %tmp7p, %tmp7q
144 %add.3 = add i32 %mul.3, %add.2
145 %tmp8p = load i32, ptr %arrayidx.p.4, align 4
146 %tmp8q = load i32, ptr %arrayidx.q.4, align 4
147 %mul.4 = mul i32 %tmp8p, %tmp8q
148 %add.4 = add i32 %mul.4, %add.3
149 %tmp9p = load i32, ptr %arrayidx.p.5, align 4
150 %tmp9q = load i32, ptr %arrayidx.q.5, align 4
151 %mul.5 = mul i32 %tmp9p, %tmp9q
152 %add.5 = add i32 %mul.5, %add.4
153 %tmp10p = load i32, ptr %arrayidx.p.6, align 4
154 %tmp10q = load i32, ptr %arrayidx.q.6, align 4
155 %mul.6 = mul i32 %tmp10p, %tmp10q
156 %add.6 = add i32 %mul.6, %add.5
157 %tmp11p = load i32, ptr %arrayidx.p.7, align 4
158 %tmp11q = load i32, ptr %arrayidx.q.7, align 4
159 %mul.7 = mul i32 %tmp11p, %tmp11q
160 %add.7 = add i32 %mul.7, %add.6
161 br i1 true, label %for.end, label %for.body
169 ;int test3(unsigned int *p, unsigned int *q) {
172 ; for (int y = 0; y < 2; y++) {
173 ; // Inner loop gets unrolled
174 ; for (int x = 0; x < 8; x++) {
175 ; sum += pptr q[7-x];
177 ; // Dummy call to keep outer loop alive
183 define i32 @test3(ptr nocapture readonly %p, ptr nocapture readonly %q) {
184 ; CHECK-LABEL: @test3(
186 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
188 ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
189 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 4
190 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[Q:%.*]], align 4
191 ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
192 ; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[SHUFFLE]]
193 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
194 ; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
195 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
197 ; CHECK-NEXT: ret i32 [[OP_RDX]]
200 %arrayidx.p.1 = getelementptr inbounds i32, ptr %p, i64 1
201 %arrayidx.p.2 = getelementptr inbounds i32, ptr %p, i64 2
202 %arrayidx.p.3 = getelementptr inbounds i32, ptr %p, i64 3
203 %arrayidx.p.4 = getelementptr inbounds i32, ptr %p, i64 4
204 %arrayidx.p.5 = getelementptr inbounds i32, ptr %p, i64 5
205 %arrayidx.p.6 = getelementptr inbounds i32, ptr %p, i64 6
206 %arrayidx.p.7 = getelementptr inbounds i32, ptr %p, i64 7
208 %arrayidx.q.1 = getelementptr inbounds i32, ptr %q, i64 1
209 %arrayidx.q.2 = getelementptr inbounds i32, ptr %q, i64 2
210 %arrayidx.q.3 = getelementptr inbounds i32, ptr %q, i64 3
211 %arrayidx.q.4 = getelementptr inbounds i32, ptr %q, i64 4
212 %arrayidx.q.5 = getelementptr inbounds i32, ptr %q, i64 5
213 %arrayidx.q.6 = getelementptr inbounds i32, ptr %q, i64 6
214 %arrayidx.q.7 = getelementptr inbounds i32, ptr %q, i64 7
218 %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
219 %tmpp = load i32, ptr %p, align 4
220 %tmpq = load i32, ptr %arrayidx.q.7, align 4
221 %mul = mul i32 %tmpp, %tmpq
222 %add = add i32 %mul, %sum
223 %tmp5p = load i32, ptr %arrayidx.p.1, align 4
224 %tmp5q = load i32, ptr %arrayidx.q.6, align 4
225 %mul.1 = mul i32 %tmp5p, %tmp5q
226 %add.1 = add i32 %mul.1, %add
227 %tmp6p = load i32, ptr %arrayidx.p.2, align 4
228 %tmp6q = load i32, ptr %arrayidx.q.5, align 4
229 %mul.2 = mul i32 %tmp6p, %tmp6q
230 %add.2 = add i32 %mul.2, %add.1
231 %tmp7p = load i32, ptr %arrayidx.p.3, align 4
232 %tmp7q = load i32, ptr %arrayidx.q.4, align 4
233 %mul.3 = mul i32 %tmp7p, %tmp7q
234 %add.3 = add i32 %mul.3, %add.2
235 %tmp8p = load i32, ptr %arrayidx.p.4, align 4
236 %tmp8q = load i32, ptr %arrayidx.q.3, align 4
237 %mul.4 = mul i32 %tmp8p, %tmp8q
238 %add.4 = add i32 %mul.4, %add.3
239 %tmp9p = load i32, ptr %arrayidx.p.5, align 4
240 %tmp9q = load i32, ptr %arrayidx.q.2, align 4
241 %mul.5 = mul i32 %tmp9p, %tmp9q
242 %add.5 = add i32 %mul.5, %add.4
243 %tmp10p = load i32, ptr %arrayidx.p.6, align 4
244 %tmp10q = load i32, ptr %arrayidx.q.1, align 4
245 %mul.6 = mul i32 %tmp10p, %tmp10q
246 %add.6 = add i32 %mul.6, %add.5
247 %tmp11p = load i32, ptr %arrayidx.p.7, align 4
248 %tmp11q = load i32, ptr %q, align 4
249 %mul.7 = mul i32 %tmp11p, %tmp11q
250 %add.7 = add i32 %mul.7, %add.6
251 br i1 true, label %for.end, label %for.body