1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK
4 ; Exercise tail folding on RISCV w/scalable vectors.
6 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
7 target triple = "riscv64"
9 define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
10 ; CHECK-LABEL: @vector_add(
12 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
14 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
15 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
16 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
17 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
18 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
19 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
20 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
21 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
22 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
23 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
24 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
26 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
27 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
28 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1025)
29 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
30 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
31 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
32 ; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
33 ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
34 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
35 ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
36 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
37 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
38 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
39 ; CHECK: middle.block:
40 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
42 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
43 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
45 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
46 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
47 ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
48 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
49 ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
50 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
51 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
52 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
54 ; CHECK-NEXT: ret void
60 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
61 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
62 %elem = load i64, ptr %arrayidx
63 %add = add i64 %elem, %v
64 store i64 %add, ptr %arrayidx
65 %iv.next = add nuw nsw i64 %iv, 1
66 %exitcond.not = icmp eq i64 %iv.next, 1025
67 br i1 %exitcond.not, label %for.end, label %for.body
74 ; a[b[i]] = v, exercise scatter support
75 define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
76 ; CHECK-LABEL: @indexed_store(
78 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
80 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
81 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
82 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
83 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
84 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
85 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
86 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
87 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
88 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
89 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
90 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
92 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
93 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
94 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1025)
95 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[TMP5]]
96 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
97 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
98 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD]]
99 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
100 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
101 ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
102 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
103 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
104 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
105 ; CHECK: middle.block:
106 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
108 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
109 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
111 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
112 ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
113 ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
114 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
115 ; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8
116 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
117 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
118 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
120 ; CHECK-NEXT: ret void
126 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
127 %baddr = getelementptr inbounds i64, ptr %b, i64 %iv
128 %aidx = load i64, ptr %baddr
129 %aaddr = getelementptr inbounds i64, ptr %a, i64 %aidx
130 store i64 %v, ptr %aaddr
131 %iv.next = add nuw nsw i64 %iv, 1
132 %exitcond.not = icmp eq i64 %iv.next, 1025
133 br i1 %exitcond.not, label %for.end, label %for.body
139 define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
140 ; CHECK-LABEL: @indexed_load(
142 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
144 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
145 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
146 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
147 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
148 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
149 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
150 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
151 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
152 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
153 ; CHECK: vector.body:
154 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
155 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
156 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
157 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1025)
158 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[TMP5]]
159 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
160 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
161 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD]]
162 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
163 ; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
164 ; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> [[TMP9]], <vscale x 2 x i64> [[VEC_PHI]]
165 ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
166 ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2
167 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP12]]
168 ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
169 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
170 ; CHECK: middle.block:
171 ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP10]])
172 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
174 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
175 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
176 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
178 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
179 ; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
180 ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
181 ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
182 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
183 ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8
184 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
185 ; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
186 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
187 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
189 ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
190 ; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
196 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
197 %sum = phi i64 [0, %entry], [%sum.next, %for.body]
198 %baddr = getelementptr inbounds i64, ptr %b, i64 %iv
199 %aidx = load i64, ptr %baddr
200 %aaddr = getelementptr inbounds i64, ptr %a, i64 %aidx
201 %elem = load i64, ptr %aaddr
202 %iv.next = add nuw nsw i64 %iv, 1
203 %sum.next = add i64 %sum, %elem
204 %exitcond.not = icmp eq i64 %iv.next, 1025
205 br i1 %exitcond.not, label %for.end, label %for.body
211 define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
212 ; CHECK-LABEL: @splat_int(
214 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
216 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
217 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
218 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
219 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
220 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
221 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
222 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
223 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
224 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
225 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
226 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
227 ; CHECK: vector.body:
228 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
229 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
230 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1025)
231 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
232 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
233 ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
234 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
235 ; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 2
236 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
237 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
238 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
239 ; CHECK: middle.block:
240 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
242 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
243 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
245 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
246 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
247 ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
248 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
249 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
250 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
252 ; CHECK-NEXT: ret void
258 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
259 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
260 store i64 %v, ptr %arrayidx
261 %iv.next = add nuw nsw i64 %iv, 1
262 %exitcond.not = icmp eq i64 %iv.next, 1025
263 br i1 %exitcond.not, label %for.end, label %for.body
269 define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
270 ; CHECK-LABEL: @uniform_store(
272 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
274 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
275 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
276 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
277 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
278 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
279 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]]
280 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
281 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
282 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
283 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
284 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
285 ; CHECK: vector.body:
286 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
287 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
288 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1025)
289 ; CHECK-NEXT: store i64 [[V]], ptr [[B:%.*]], align 8
290 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
291 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
292 ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
293 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
294 ; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 2
295 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
296 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
297 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
298 ; CHECK: middle.block:
299 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
301 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
302 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
304 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
305 ; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
306 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
307 ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
308 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
309 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
310 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
312 ; CHECK-NEXT: ret void
318 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
319 store i64 %v, ptr %b, align 8
320 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
321 store i64 %v, ptr %arrayidx
322 %iv.next = add nuw nsw i64 %iv, 1
323 %exitcond.not = icmp eq i64 %iv.next, 1025
324 br i1 %exitcond.not, label %for.end, label %for.body
330 define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) {
331 ; CHECK-LABEL: @uniform_load(
333 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
335 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
336 ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8
337 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]]
338 ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
339 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
340 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
341 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
343 ; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], [[FOR_BODY]] ]
344 ; CHECK-NEXT: ret i64 [[V_LCSSA]]
350 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
351 %v = load i64, ptr %b, align 8
352 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
353 store i64 %v, ptr %arrayidx
354 %iv.next = add nuw nsw i64 %iv, 1
355 %exitcond.not = icmp eq i64 %iv.next, 1025
356 br i1 %exitcond.not, label %for.end, label %for.body
363 define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
364 ; CHECK-LABEL: @vector_add_trip1024(
366 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
368 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
369 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
370 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
371 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
372 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
373 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
374 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
375 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
376 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
377 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
378 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
379 ; CHECK: vector.body:
380 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
381 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
382 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP5]], i64 1024)
383 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
384 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
385 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
386 ; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
387 ; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
388 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
389 ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
390 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
391 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
392 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
393 ; CHECK: middle.block:
394 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
396 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
397 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
399 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
400 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
401 ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
402 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
403 ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
404 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
405 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
406 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
408 ; CHECK-NEXT: ret void
414 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
415 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
416 %elem = load i64, ptr %arrayidx
417 %add = add i64 %elem, %v
418 store i64 %add, ptr %arrayidx
419 %iv.next = add nuw nsw i64 %iv, 1
420 %exitcond.not = icmp eq i64 %iv.next, 1024
421 br i1 %exitcond.not, label %for.end, label %for.body