1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s
5 define i32 @test(i32* nocapture readonly %p) {
8 ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
9 ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
10 ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
11 ; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
12 ; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
13 ; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
14 ; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
15 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
17 ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[BIN_EXTRA:%.*]], [[FOR_BODY]] ]
18 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
19 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
20 ; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, [[TMP1]]
21 ; CHECK-NEXT: [[ADD:%.*]] = add i32 undef, [[SUM]]
22 ; CHECK-NEXT: [[ADD_1:%.*]] = add i32 undef, [[ADD]]
23 ; CHECK-NEXT: [[ADD_2:%.*]] = add i32 undef, [[ADD_1]]
24 ; CHECK-NEXT: [[ADD_3:%.*]] = add i32 undef, [[ADD_2]]
25 ; CHECK-NEXT: [[ADD_4:%.*]] = add i32 undef, [[ADD_3]]
26 ; CHECK-NEXT: [[ADD_5:%.*]] = add i32 undef, [[ADD_4]]
27 ; CHECK-NEXT: [[ADD_6:%.*]] = add i32 undef, [[ADD_5]]
28 ; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
29 ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP2]], [[RDX_SHUF]]
30 ; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
31 ; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
32 ; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
33 ; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
34 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
35 ; CHECK-NEXT: [[BIN_EXTRA]] = add i32 [[TMP3]], [[SUM]]
36 ; CHECK-NEXT: [[ADD_7:%.*]] = add i32 undef, [[ADD_6]]
37 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
39 ; CHECK-NEXT: ret i32 [[BIN_EXTRA]]
42 %arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1
43 %arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2
44 %arrayidx.3 = getelementptr inbounds i32, i32* %p, i64 3
45 %arrayidx.4 = getelementptr inbounds i32, i32* %p, i64 4
46 %arrayidx.5 = getelementptr inbounds i32, i32* %p, i64 5
47 %arrayidx.6 = getelementptr inbounds i32, i32* %p, i64 6
48 %arrayidx.7 = getelementptr inbounds i32, i32* %p, i64 7
52 %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
53 %tmp = load i32, i32* %p, align 4
54 %mul = mul i32 %tmp, 42
55 %add = add i32 %mul, %sum
56 %tmp5 = load i32, i32* %arrayidx.1, align 4
57 %mul.1 = mul i32 %tmp5, 42
58 %add.1 = add i32 %mul.1, %add
59 %tmp6 = load i32, i32* %arrayidx.2, align 4
60 %mul.2 = mul i32 %tmp6, 42
61 %add.2 = add i32 %mul.2, %add.1
62 %tmp7 = load i32, i32* %arrayidx.3, align 4
63 %mul.3 = mul i32 %tmp7, 42
64 %add.3 = add i32 %mul.3, %add.2
65 %tmp8 = load i32, i32* %arrayidx.4, align 4
66 %mul.4 = mul i32 %tmp8, 42
67 %add.4 = add i32 %mul.4, %add.3
68 %tmp9 = load i32, i32* %arrayidx.5, align 4
69 %mul.5 = mul i32 %tmp9, 42
70 %add.5 = add i32 %mul.5, %add.4
71 %tmp10 = load i32, i32* %arrayidx.6, align 4
72 %mul.6 = mul i32 %tmp10, 42
73 %add.6 = add i32 %mul.6, %add.5
74 %tmp11 = load i32, i32* %arrayidx.7, align 4
75 %mul.7 = mul i32 %tmp11, 42
76 %add.7 = add i32 %mul.7, %add.6
77 br i1 true, label %for.end, label %for.body