1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; This is the loop in c++ being vectorize in this file with
3 ;experimental.vector.reverse
4 ; #pragma clang loop vectorize_width(8, scalable)
5 ; for (int i = N-1; i >= 0; --i)
8 ; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s
10 define void @vector_reverse_f64(i64 %N, double* %a, double* %b) #0{
11 ; CHECK-LABEL: @vector_reverse_f64(
13 ; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0
14 ; CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
15 ; CHECK: for.body.preheader:
16 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
17 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3
18 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
19 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
20 ; CHECK: vector.memcheck:
21 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, double* [[A:%.*]], i64 [[N]]
22 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr double, double* [[B:%.*]], i64 [[N]]
23 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt double* [[SCEVGEP4]], [[A]]
24 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt double* [[SCEVGEP]], [[B]]
25 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
26 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
28 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
29 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
30 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
31 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
32 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
34 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
35 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1
36 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]]
37 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
38 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP6]], -8
39 ; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[DOTNEG]], 1
40 ; CHECK-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
41 ; CHECK-NEXT: [[DOTIDX:%.*]] = add nsw i64 [[TMP5]], [[TMP8]]
42 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[DOTIDX]]
43 ; CHECK-NEXT: [[TMP10:%.*]] = bitcast double* [[TMP9]] to <vscale x 8 x double>*
44 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, <vscale x 8 x double>* [[TMP10]], align 8, !alias.scope !0
45 ; CHECK-NEXT: [[REVERSE:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double> [[WIDE_LOAD]])
46 ; CHECK-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x double> [[REVERSE]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i32 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
47 ; CHECK-NEXT: [[REVERSE6:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double> [[TMP11]])
48 ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32()
49 ; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP12]], -8
50 ; CHECK-NEXT: [[TMP13:%.*]] = or i32 [[DOTNEG7]], 1
51 ; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
52 ; CHECK-NEXT: [[DOTIDX8:%.*]] = add nsw i64 [[TMP5]], [[TMP14]]
53 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[DOTIDX8]]
54 ; CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[TMP15]] to <vscale x 8 x double>*
55 ; CHECK-NEXT: store <vscale x 8 x double> [[REVERSE6]], <vscale x 8 x double>* [[TMP16]], align 8, !alias.scope !3, !noalias !0
56 ; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
57 ; CHECK-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 3
58 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
59 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
60 ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
61 ; CHECK: middle.block:
62 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
63 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
65 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ]
66 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
67 ; CHECK: for.cond.cleanup.loopexit:
68 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
69 ; CHECK: for.cond.cleanup:
70 ; CHECK-NEXT: ret void
72 ; CHECK-NEXT: [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
73 ; CHECK-NEXT: [[I_08]] = add nsw i64 [[I_08_IN]], -1
74 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[I_08]]
75 ; CHECK-NEXT: [[TMP20:%.*]] = load double, double* [[ARRAYIDX]], align 8
76 ; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP20]], 1.000000e+00
77 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[I_08]]
78 ; CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX1]], align 8
79 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_08_IN]], 1
80 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]]
83 %cmp7 = icmp sgt i64 %N, 0
84 br i1 %cmp7, label %for.body, label %for.cond.cleanup
86 for.cond.cleanup: ; preds = %for.body
89 for.body: ; preds = %entry, %for.body
90 %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ]
91 %i.08 = add nsw i64 %i.08.in, -1
92 %arrayidx = getelementptr inbounds double, double* %b, i64 %i.08
93 %0 = load double, double* %arrayidx, align 8
94 %add = fadd double %0, 1.000000e+00
95 %arrayidx1 = getelementptr inbounds double, double* %a, i64 %i.08
96 store double %add, double* %arrayidx1, align 8
97 %cmp = icmp sgt i64 %i.08.in, 1
98 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
102 define void @vector_reverse_i64(i64 %N, i64* %a, i64* %b) #0 {
103 ; CHECK-LABEL: @vector_reverse_i64(
105 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0
106 ; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
107 ; CHECK: for.body.preheader:
108 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
109 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3
110 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
111 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
112 ; CHECK: vector.memcheck:
113 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i64, i64* [[A:%.*]], i64 [[N]]
114 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[N]]
115 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i64* [[SCEVGEP4]], [[A]]
116 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i64* [[SCEVGEP]], [[B]]
117 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
118 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
120 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
121 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
122 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
123 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
124 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
125 ; CHECK: vector.body:
126 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
127 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1
128 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]]
129 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
130 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP6]], -8
131 ; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[DOTNEG]], 1
132 ; CHECK-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
133 ; CHECK-NEXT: [[DOTIDX:%.*]] = add nsw i64 [[TMP5]], [[TMP8]]
134 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[DOTIDX]]
135 ; CHECK-NEXT: [[TMP10:%.*]] = bitcast i64* [[TMP9]] to <vscale x 8 x i64>*
136 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, <vscale x 8 x i64>* [[TMP10]], align 8, !alias.scope !9
137 ; CHECK-NEXT: [[REVERSE:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> [[WIDE_LOAD]])
138 ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i64> [[REVERSE]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
139 ; CHECK-NEXT: [[REVERSE6:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> [[TMP11]])
140 ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32()
141 ; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP12]], -8
142 ; CHECK-NEXT: [[TMP13:%.*]] = or i32 [[DOTNEG7]], 1
143 ; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
144 ; CHECK-NEXT: [[DOTIDX8:%.*]] = add nsw i64 [[TMP5]], [[TMP14]]
145 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[DOTIDX8]]
146 ; CHECK-NEXT: [[TMP16:%.*]] = bitcast i64* [[TMP15]] to <vscale x 8 x i64>*
147 ; CHECK-NEXT: store <vscale x 8 x i64> [[REVERSE6]], <vscale x 8 x i64>* [[TMP16]], align 8, !alias.scope !12, !noalias !9
148 ; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
149 ; CHECK-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 3
150 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
151 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
152 ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
153 ; CHECK: middle.block:
154 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
155 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
157 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ]
158 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
159 ; CHECK: for.cond.cleanup.loopexit:
160 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
161 ; CHECK: for.cond.cleanup:
162 ; CHECK-NEXT: ret void
164 ; CHECK-NEXT: [[I_09_IN:%.*]] = phi i64 [ [[I_09:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
165 ; CHECK-NEXT: [[I_09]] = add nsw i64 [[I_09_IN]], -1
166 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_09]]
167 ; CHECK-NEXT: [[TMP20:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
168 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[TMP20]], 1
169 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_09]]
170 ; CHECK-NEXT: store i64 [[ADD]], i64* [[ARRAYIDX2]], align 8
171 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_09_IN]], 1
172 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP15:![0-9]+]]
175 %cmp8 = icmp sgt i64 %N, 0
176 br i1 %cmp8, label %for.body, label %for.cond.cleanup
178 for.cond.cleanup: ; preds = %for.body
181 for.body: ; preds = %entry, %for.body
182 %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ]
183 %i.09 = add nsw i64 %i.09.in, -1
184 %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.09
185 %0 = load i64, i64* %arrayidx, align 8
187 %arrayidx2 = getelementptr inbounds i64, i64* %a, i64 %i.09
188 store i64 %add, i64* %arrayidx2, align 8
189 %cmp = icmp sgt i64 %i.09.in, 1
190 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
193 attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" }
195 !0 = distinct !{!0, !1, !2, !3, !4}
196 !1 = !{!"llvm.loop.mustprogress"}
197 !2 = !{!"llvm.loop.vectorize.width", i32 8}
198 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
199 !4 = !{!"llvm.loop.vectorize.enable", i1 true}