1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; This is the loop in c++ being vectorize in this file with
4 ; #pragma clang loop vectorize_width(8, scalable) interleave_count(2)
5 ; for (int i = N-1; i >= 0; --i)
8 ; RUN: opt -passes=loop-vectorize,dce,instcombine -mtriple aarch64-linux-gnu -S \
9 ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck %s
11 define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{
12 ; CHECK-LABEL: @vector_reverse_f64(
14 ; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0
15 ; CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
16 ; CHECK: for.body.preheader:
17 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
18 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
19 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
20 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
22 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
23 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4
24 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
25 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
26 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
27 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 3
28 ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP4]], 4
29 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
31 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
32 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[INDEX]], -1
33 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[N]], [[TMP7]]
34 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP8]]
35 ; CHECK-NEXT: [[TMP10:%.*]] = sub i64 1, [[TMP5]]
36 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP9]], i64 [[TMP10]]
37 ; CHECK-NEXT: [[TMP12:%.*]] = sub i64 0, [[TMP5]]
38 ; CHECK-NEXT: [[TMP13:%.*]] = sub i64 1, [[TMP5]]
39 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[TMP9]], i64 [[TMP12]]
40 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[TMP13]]
41 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, ptr [[TMP11]], align 8
42 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x double>, ptr [[TMP15]], align 8
43 ; CHECK-NEXT: [[TMP16:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
44 ; CHECK-NEXT: [[TMP17:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD1]], splat (double 1.000000e+00)
45 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP8]]
46 ; CHECK-NEXT: [[TMP19:%.*]] = sub i64 1, [[TMP5]]
47 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds double, ptr [[TMP18]], i64 [[TMP19]]
48 ; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP5]]
49 ; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP5]]
50 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[TMP18]], i64 [[TMP21]]
51 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds double, ptr [[TMP23]], i64 [[TMP22]]
52 ; CHECK-NEXT: store <vscale x 8 x double> [[TMP16]], ptr [[TMP20]], align 8
53 ; CHECK-NEXT: store <vscale x 8 x double> [[TMP17]], ptr [[TMP24]], align 8
54 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
55 ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
56 ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
57 ; CHECK: middle.block:
58 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
59 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
61 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ]
62 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
63 ; CHECK: for.cond.cleanup.loopexit:
64 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
65 ; CHECK: for.cond.cleanup:
66 ; CHECK-NEXT: ret void
68 ; CHECK-NEXT: [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
69 ; CHECK-NEXT: [[I_08]] = add nsw i64 [[I_08_IN]], -1
70 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[I_08]]
71 ; CHECK-NEXT: [[TMP26:%.*]] = load double, ptr [[ARRAYIDX]], align 8
72 ; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP26]], 1.000000e+00
73 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[I_08]]
74 ; CHECK-NEXT: store double [[ADD]], ptr [[ARRAYIDX1]], align 8
75 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_08_IN]], 1
76 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]]
79 %cmp7 = icmp sgt i64 %N, 0
80 br i1 %cmp7, label %for.body, label %for.cond.cleanup
82 for.cond.cleanup: ; preds = %for.body
85 for.body: ; preds = %entry, %for.body
86 %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ]
87 %i.08 = add nsw i64 %i.08.in, -1
88 %arrayidx = getelementptr inbounds double, ptr %b, i64 %i.08
89 %0 = load double, ptr %arrayidx, align 8
90 %add = fadd double %0, 1.000000e+00
91 %arrayidx1 = getelementptr inbounds double, ptr %a, i64 %i.08
92 store double %add, ptr %arrayidx1, align 8
93 %cmp = icmp sgt i64 %i.08.in, 1
94 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
98 define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
99 ; CHECK-LABEL: @vector_reverse_i64(
101 ; CHECK-NEXT: [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64
102 ; CHECK-NEXT: [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
103 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0
104 ; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
105 ; CHECK: for.body.preheader:
106 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
107 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
108 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
109 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
110 ; CHECK: vector.memcheck:
111 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
112 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 7
113 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
114 ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
115 ; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
117 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
118 ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 4
119 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
120 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
121 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
122 ; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 3
123 ; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP7]], 4
124 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
125 ; CHECK: vector.body:
126 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
127 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[INDEX]], -1
128 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[N]], [[TMP10]]
129 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP11]]
130 ; CHECK-NEXT: [[TMP13:%.*]] = sub i64 1, [[TMP8]]
131 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP13]]
132 ; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[TMP8]]
133 ; CHECK-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP8]]
134 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i64 [[TMP15]]
135 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP17]], i64 [[TMP16]]
136 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP14]], align 8
137 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i64>, ptr [[TMP18]], align 8
138 ; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1)
139 ; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD3]], splat (i64 1)
140 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP11]]
141 ; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP8]]
142 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP22]]
143 ; CHECK-NEXT: [[TMP24:%.*]] = sub i64 0, [[TMP8]]
144 ; CHECK-NEXT: [[TMP25:%.*]] = sub i64 1, [[TMP8]]
145 ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP24]]
146 ; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[TMP26]], i64 [[TMP25]]
147 ; CHECK-NEXT: store <vscale x 8 x i64> [[TMP19]], ptr [[TMP23]], align 8
148 ; CHECK-NEXT: store <vscale x 8 x i64> [[TMP20]], ptr [[TMP27]], align 8
149 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
150 ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
151 ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
152 ; CHECK: middle.block:
153 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
154 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
156 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ]
157 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
158 ; CHECK: for.cond.cleanup.loopexit:
159 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
160 ; CHECK: for.cond.cleanup:
161 ; CHECK-NEXT: ret void
163 ; CHECK-NEXT: [[I_09_IN:%.*]] = phi i64 [ [[I_09:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
164 ; CHECK-NEXT: [[I_09]] = add nsw i64 [[I_09_IN]], -1
165 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I_09]]
166 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
167 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[TMP29]], 1
168 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[I_09]]
169 ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX2]], align 8
170 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_09_IN]], 1
171 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
174 %cmp8 = icmp sgt i64 %N, 0
175 br i1 %cmp8, label %for.body, label %for.cond.cleanup
177 for.cond.cleanup: ; preds = %for.body
180 for.body: ; preds = %entry, %for.body
181 %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ]
182 %i.09 = add nsw i64 %i.09.in, -1
183 %arrayidx = getelementptr inbounds i64, ptr %b, i64 %i.09
184 %0 = load i64, ptr %arrayidx, align 8
186 %arrayidx2 = getelementptr inbounds i64, ptr %a, i64 %i.09
187 store i64 %add, ptr %arrayidx2, align 8
188 %cmp = icmp sgt i64 %i.09.in, 1
189 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
192 attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" }
194 !0 = distinct !{!0, !1, !2, !3, !4, !5}
195 !1 = !{!"llvm.loop.mustprogress"}
196 !2 = !{!"llvm.loop.vectorize.width", i32 8}
197 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
198 !4 = !{!"llvm.loop.vectorize.enable", i1 true}
199 !5 = !{!"llvm.loop.interleave.count", i32 2}