1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=loop-vectorize,dce,instcombine -mtriple aarch64-linux-gnu -mattr=+sve \
3 ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue -S %s -o - | FileCheck %s
5 define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocapture readonly %cond, ptr noalias nocapture readonly %inv, i64 %n) #0 {
6 ; CHECK-LABEL: @cond_inv_load_i32i32i16(
8 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
9 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
10 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
11 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
13 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
14 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
15 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
16 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
17 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
18 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
19 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
20 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
22 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
23 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[COND:%.*]], i64 [[INDEX]]
24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
25 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
26 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 2, <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i16> poison)
27 ; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
28 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
29 ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP7]], ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP6]])
30 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
31 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
32 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
33 ; CHECK: middle.block:
34 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
35 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
37 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
38 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
40 ; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
41 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[I_07]]
42 ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
43 ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP10]], 0
44 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
46 ; CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[INV]], align 2
47 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP11]] to i32
48 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_07]]
49 ; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX1]], align 4
50 ; CHECK-NEXT: br label [[FOR_INC]]
52 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1
53 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
54 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
56 ; CHECK-NEXT: ret void
61 for.body: ; preds = %entry, %for.inc
62 %i.07 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
63 %arrayidx = getelementptr inbounds i32, ptr %cond, i64 %i.07
64 %0 = load i32, ptr %arrayidx, align 4
65 %tobool.not = icmp eq i32 %0, 0
66 br i1 %tobool.not, label %for.inc, label %if.then
68 if.then: ; preds = %for.body
69 %1 = load i16, ptr %inv, align 2
70 %conv = sext i16 %1 to i32
71 %arrayidx1 = getelementptr inbounds i32, ptr %a, i64 %i.07
72 store i32 %conv, ptr %arrayidx1, align 4
75 for.inc: ; preds = %for.body, %if.then
76 %inc = add nuw nsw i64 %i.07, 1
77 %exitcond.not = icmp eq i64 %inc, %n
78 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
80 exit: ; preds = %for.inc
84 define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocapture readonly %cond, ptr noalias nocapture readonly %inv, i64 %n) #0 {
85 ; CHECK-LABEL: @cond_inv_load_f64f64f64(
87 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
88 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
89 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
90 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
92 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
93 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
94 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
95 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
96 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
97 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
98 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
99 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
100 ; CHECK: vector.body:
101 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
102 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[COND:%.*]], i64 [[INDEX]]
103 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP5]], align 8
104 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt <vscale x 4 x double> [[WIDE_LOAD]], shufflevector (<vscale x 4 x double> insertelement (<vscale x 4 x double> poison, double 4.000000e-01, i64 0), <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer)
105 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 4 x i1> [[TMP6]], <vscale x 4 x double> poison)
106 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[INDEX]]
107 ; CHECK-NEXT: call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> [[WIDE_MASKED_GATHER]], ptr [[TMP7]], i32 8, <vscale x 4 x i1> [[TMP6]])
108 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
109 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
110 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
111 ; CHECK: middle.block:
112 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
113 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
115 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
116 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
118 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
119 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[COND]], i64 [[I_08]]
120 ; CHECK-NEXT: [[TMP9:%.*]] = load double, ptr [[ARRAYIDX]], align 8
121 ; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt double [[TMP9]], 4.000000e-01
122 ; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
124 ; CHECK-NEXT: [[TMP10:%.*]] = load double, ptr [[INV]], align 8
125 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[I_08]]
126 ; CHECK-NEXT: store double [[TMP10]], ptr [[ARRAYIDX2]], align 8
127 ; CHECK-NEXT: br label [[FOR_INC]]
129 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
130 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
131 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
133 ; CHECK-NEXT: ret void
138 for.body: ; preds = %entry, %for.inc
139 %i.08 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
140 %arrayidx = getelementptr inbounds double, ptr %cond, i64 %i.08
141 %0 = load double, ptr %arrayidx, align 8
142 %cmp1 = fcmp ogt double %0, 4.000000e-01
143 br i1 %cmp1, label %if.then, label %for.inc
145 if.then: ; preds = %for.body
146 %1 = load double, ptr %inv, align 8
147 %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %i.08
148 store double %1, ptr %arrayidx2, align 8
151 for.inc: ; preds = %for.body, %if.then
152 %inc = add nuw nsw i64 %i.08, 1
153 %exitcond.not = icmp eq i64 %inc, %n
154 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
156 exit: ; preds = %for.inc
160 define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %cond, i64 %n) #0 {
161 ; CHECK-LABEL: @invariant_load_cond(
163 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
164 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
165 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
166 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
168 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
169 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
170 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
171 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
172 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
173 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
174 ; CHECK: vector.body:
175 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
176 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 42
177 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[TMP5]], i64 0
178 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
179 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND:%.*]], i64 [[INDEX]]
180 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
181 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
182 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]]
183 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
184 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
185 ; CHECK-NEXT: [[TMP9:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_LOAD]]
186 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
187 ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP7]])
188 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
189 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
190 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
191 ; CHECK: middle.block:
192 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
193 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
195 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
196 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
198 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
199 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
200 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
201 ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP12]], 0
202 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
204 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 42
205 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
206 ; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
207 ; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
208 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
209 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
210 ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4
211 ; CHECK-NEXT: br label [[FOR_INC]]
213 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
214 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
215 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
217 ; CHECK-NEXT: ret void
223 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.inc ]
224 %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 42
225 %arrayidx2 = getelementptr inbounds i32, ptr %cond, i64 %iv
226 %0 = load i32, ptr %arrayidx2, align 4
227 %tobool.not = icmp eq i32 %0, 0
228 br i1 %tobool.not, label %for.inc, label %if.then
231 %arrayidx3 = getelementptr inbounds i32, ptr %b, i64 %iv
232 %1 = load i32, ptr %arrayidx3, align 4
233 %2 = load i32, ptr %arrayidx1, align 4
234 %add = add nsw i32 %2, %1
235 %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv
236 store i32 %add, ptr %arrayidx4, align 4
240 %iv.next = add nuw nsw i64 %iv, 1
241 %exitcond.not = icmp eq i64 %iv.next, %n
242 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
248 attributes #0 = { vscale_range(1, 16) }
249 !0 = distinct !{!0, !1, !2, !3, !4, !5}
250 !1 = !{!"llvm.loop.mustprogress"}
251 !2 = !{!"llvm.loop.vectorize.width", i32 4}
252 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
253 !4 = !{!"llvm.loop.interleave.count", i32 1}
254 !5 = !{!"llvm.loop.vectorize.enable", i1 true}