1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -mtriple=x86_64-pc_linux -passes=loop-vectorize,instcombine < %s | FileCheck %s --check-prefix=SSE
3 ; RUN: opt -S -mtriple=x86_64-pc_linux -passes=loop-vectorize,instcombine -mcpu=sandybridge < %s | FileCheck %s --check-prefix=AVX1
4 ; RUN: opt -S -mtriple=x86_64-pc_linux -passes=loop-vectorize,instcombine -mcpu=haswell < %s | FileCheck %s --check-prefix=AVX2
5 ; RUN: opt -S -mtriple=x86_64-pc_linux -passes=loop-vectorize,instcombine -mcpu=slm < %s | FileCheck %s --check-prefix=SSE
6 ; RUN: opt -S -mtriple=x86_64-pc_linux -passes=loop-vectorize,instcombine -mcpu=atom < %s | FileCheck %s --check-prefix=ATOM
8 define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
11 ; SSE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
13 ; SSE-NEXT: br label [[VECTOR_BODY:%.*]]
15 ; SSE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
16 ; SSE-NEXT: [[TMP0:%.*]] = shl i64 [[INDEX]], 1
17 ; SSE-NEXT: [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 8
18 ; SSE-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[INDEX]], 3
19 ; SSE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[DOTIDX]]
20 ; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]]
21 ; SSE-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4
22 ; SSE-NEXT: [[WIDE_VEC1:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4
23 ; SSE-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
24 ; SSE-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
25 ; SSE-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
26 ; SSE-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
27 ; SSE-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC]]
28 ; SSE-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC2]]
29 ; SSE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
30 ; SSE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 16
31 ; SSE-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4
32 ; SSE-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP7]], align 4
33 ; SSE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
34 ; SSE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
35 ; SSE-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
37 ; SSE-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
39 ; SSE-NEXT: br label [[FOR_BODY:%.*]]
40 ; SSE: for.cond.cleanup:
43 ; SSE-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
47 ; AVX1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
49 ; AVX1-NEXT: br label [[VECTOR_BODY:%.*]]
51 ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
52 ; AVX1-NEXT: [[TMP0:%.*]] = shl i64 [[INDEX]], 1
53 ; AVX1-NEXT: [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 8
54 ; AVX1-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
55 ; AVX1-NEXT: [[TMP3:%.*]] = or disjoint i64 [[TMP2]], 16
56 ; AVX1-NEXT: [[TMP4:%.*]] = shl i64 [[INDEX]], 1
57 ; AVX1-NEXT: [[TMP5:%.*]] = or disjoint i64 [[TMP4]], 24
58 ; AVX1-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[INDEX]], 3
59 ; AVX1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[DOTIDX]]
60 ; AVX1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]]
61 ; AVX1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP3]]
62 ; AVX1-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP5]]
63 ; AVX1-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4
64 ; AVX1-NEXT: [[WIDE_VEC1:%.*]] = load <8 x i32>, ptr [[TMP7]], align 4
65 ; AVX1-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i32>, ptr [[TMP8]], align 4
66 ; AVX1-NEXT: [[WIDE_VEC3:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4
67 ; AVX1-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
68 ; AVX1-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
69 ; AVX1-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
70 ; AVX1-NEXT: [[STRIDED_VEC6:%.*]] = shufflevector <8 x i32> [[WIDE_VEC3]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
71 ; AVX1-NEXT: [[STRIDED_VEC7:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
72 ; AVX1-NEXT: [[STRIDED_VEC8:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
73 ; AVX1-NEXT: [[STRIDED_VEC9:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
74 ; AVX1-NEXT: [[STRIDED_VEC10:%.*]] = shufflevector <8 x i32> [[WIDE_VEC3]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
75 ; AVX1-NEXT: [[TMP10:%.*]] = add nsw <4 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC]]
76 ; AVX1-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[STRIDED_VEC8]], [[STRIDED_VEC4]]
77 ; AVX1-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[STRIDED_VEC9]], [[STRIDED_VEC5]]
78 ; AVX1-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC6]]
79 ; AVX1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
80 ; AVX1-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 16
81 ; AVX1-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 32
82 ; AVX1-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 48
83 ; AVX1-NEXT: store <4 x i32> [[TMP10]], ptr [[TMP14]], align 4
84 ; AVX1-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP15]], align 4
85 ; AVX1-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP16]], align 4
86 ; AVX1-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP17]], align 4
87 ; AVX1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
88 ; AVX1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
89 ; AVX1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
91 ; AVX1-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
93 ; AVX1-NEXT: br label [[FOR_BODY:%.*]]
94 ; AVX1: for.cond.cleanup:
97 ; AVX1-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
101 ; AVX2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
103 ; AVX2-NEXT: br label [[VECTOR_BODY:%.*]]
105 ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
106 ; AVX2-NEXT: [[TMP0:%.*]] = shl i64 [[INDEX]], 1
107 ; AVX2-NEXT: [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 16
108 ; AVX2-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
109 ; AVX2-NEXT: [[TMP3:%.*]] = or disjoint i64 [[TMP2]], 32
110 ; AVX2-NEXT: [[TMP4:%.*]] = shl i64 [[INDEX]], 1
111 ; AVX2-NEXT: [[TMP5:%.*]] = or disjoint i64 [[TMP4]], 48
112 ; AVX2-NEXT: [[DOTIDX:%.*]] = shl nsw i64 [[INDEX]], 3
113 ; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[DOTIDX]]
114 ; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]]
115 ; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP3]]
116 ; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP5]]
117 ; AVX2-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP6]], align 4
118 ; AVX2-NEXT: [[WIDE_VEC1:%.*]] = load <16 x i32>, ptr [[TMP7]], align 4
119 ; AVX2-NEXT: [[WIDE_VEC2:%.*]] = load <16 x i32>, ptr [[TMP8]], align 4
120 ; AVX2-NEXT: [[WIDE_VEC3:%.*]] = load <16 x i32>, ptr [[TMP9]], align 4
121 ; AVX2-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
122 ; AVX2-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
123 ; AVX2-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
124 ; AVX2-NEXT: [[STRIDED_VEC6:%.*]] = shufflevector <16 x i32> [[WIDE_VEC3]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
125 ; AVX2-NEXT: [[STRIDED_VEC7:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
126 ; AVX2-NEXT: [[STRIDED_VEC8:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
127 ; AVX2-NEXT: [[STRIDED_VEC9:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
128 ; AVX2-NEXT: [[STRIDED_VEC10:%.*]] = shufflevector <16 x i32> [[WIDE_VEC3]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
129 ; AVX2-NEXT: [[TMP10:%.*]] = add nsw <8 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC]]
130 ; AVX2-NEXT: [[TMP11:%.*]] = add nsw <8 x i32> [[STRIDED_VEC8]], [[STRIDED_VEC4]]
131 ; AVX2-NEXT: [[TMP12:%.*]] = add nsw <8 x i32> [[STRIDED_VEC9]], [[STRIDED_VEC5]]
132 ; AVX2-NEXT: [[TMP13:%.*]] = add nsw <8 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC6]]
133 ; AVX2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
134 ; AVX2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 32
135 ; AVX2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 64
136 ; AVX2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 96
137 ; AVX2-NEXT: store <8 x i32> [[TMP10]], ptr [[TMP14]], align 4
138 ; AVX2-NEXT: store <8 x i32> [[TMP11]], ptr [[TMP15]], align 4
139 ; AVX2-NEXT: store <8 x i32> [[TMP12]], ptr [[TMP16]], align 4
140 ; AVX2-NEXT: store <8 x i32> [[TMP13]], ptr [[TMP17]], align 4
141 ; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
142 ; AVX2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
143 ; AVX2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
144 ; AVX2: middle.block:
145 ; AVX2-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
147 ; AVX2-NEXT: br label [[FOR_BODY:%.*]]
148 ; AVX2: for.cond.cleanup:
149 ; AVX2-NEXT: ret void
151 ; AVX2-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
155 ; ATOM-NEXT: br label [[FOR_BODY:%.*]]
156 ; ATOM: for.cond.cleanup:
157 ; ATOM-NEXT: ret void
159 ; ATOM-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
160 ; ATOM-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
161 ; ATOM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]]
162 ; ATOM-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
163 ; ATOM-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP0]], 1
164 ; ATOM-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]]
165 ; ATOM-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
166 ; ATOM-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], [[TMP1]]
167 ; ATOM-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
168 ; ATOM-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX6]], align 4
169 ; ATOM-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
170 ; ATOM-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
171 ; ATOM-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
176 for.cond.cleanup: ; preds = %for.body
179 for.body: ; preds = %for.body, %entry
180 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
181 %0 = shl nsw i64 %indvars.iv, 1
182 %arrayidx = getelementptr inbounds i32, ptr %b, i64 %0
183 %1 = load i32, ptr %arrayidx, align 4
184 %2 = or disjoint i64 %0, 1
185 %arrayidx3 = getelementptr inbounds i32, ptr %b, i64 %2
186 %3 = load i32, ptr %arrayidx3, align 4
187 %add4 = add nsw i32 %3, %1
188 %arrayidx6 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
189 store i32 %add4, ptr %arrayidx6, align 4
190 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
191 %exitcond = icmp eq i64 %indvars.iv.next, 1024
192 br i1 %exitcond, label %for.cond.cleanup, label %for.body