1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S | FileCheck --check-prefixes=CHECK,NOSTRIDED %s
3 ; RUN: opt < %s -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -lv-strided-pointer-ivs=true -laa-speculate-unit-stride=false -S | FileCheck --check-prefixes=CHECK,STRIDED %s
6 define void @single_constant_stride_int_scaled(ptr %p) {
7 ; CHECK-LABEL: @single_constant_stride_int_scaled(
9 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
10 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
11 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
12 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
14 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
15 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
17 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
18 ; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
19 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
20 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
21 ; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i64> [[TMP6]], zeroinitializer
22 ; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP7]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
23 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
24 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
25 ; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
26 ; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP10]]
27 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
28 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
29 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
31 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
32 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
33 ; CHECK-NEXT: [[TMP12:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 8, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
34 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP12]]
35 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison)
36 ; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
37 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
38 ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
39 ; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
40 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
41 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
42 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
43 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
44 ; CHECK: middle.block:
45 ; CHECK-NEXT: br label [[SCALAR_PH]]
47 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
48 ; CHECK-NEXT: br label [[LOOP:%.*]]
50 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
51 ; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8
52 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
53 ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
54 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
55 ; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
56 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
57 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
58 ; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
60 ; CHECK-NEXT: ret void
65 %i = phi i64 [0, %entry], [%nexti, %loop]
67 %offset = mul nsw nuw i64 %i, 8
68 %q0 = getelementptr i32, ptr %p, i64 %offset
69 %x0 = load i32, ptr %q0
71 store i32 %y0, ptr %q0
73 %nexti = add i64 %i, 1
74 %done = icmp eq i64 %nexti, 1024
75 br i1 %done, label %exit, label %loop
80 define void @single_constant_stride_int_iv(ptr %p) {
81 ; CHECK-LABEL: @single_constant_stride_int_iv(
83 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
84 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
85 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
86 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
88 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
89 ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
90 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
91 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
92 ; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 64
93 ; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
94 ; CHECK-NEXT: [[TMP5:%.*]] = add <vscale x 4 x i64> [[TMP4]], zeroinitializer
95 ; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP5]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 64, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
96 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]
97 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
98 ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
99 ; CHECK-NEXT: [[TMP9:%.*]] = mul i64 64, [[TMP8]]
100 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
101 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
102 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
103 ; CHECK: vector.body:
104 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
105 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
106 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
107 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison)
108 ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
109 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
110 ; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
111 ; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4
112 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
113 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
114 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
115 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
116 ; CHECK: middle.block:
117 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
118 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
120 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
121 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
122 ; CHECK-NEXT: br label [[LOOP:%.*]]
124 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
125 ; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
126 ; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
127 ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
128 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
129 ; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
130 ; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64
131 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
132 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
133 ; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
135 ; CHECK-NEXT: ret void
140 %i = phi i64 [0, %entry], [%nexti, %loop]
141 %offset = phi i64 [0, %entry], [%offset.next, %loop]
143 %q0 = getelementptr i32, ptr %p, i64 %offset
144 %x0 = load i32, ptr %q0
146 store i32 %y0, ptr %q0
148 %offset.next = add nsw nuw i64 %offset, 64
149 %nexti = add i64 %i, 1
150 %done = icmp eq i64 %nexti, 1024
151 br i1 %done, label %exit, label %loop
157 define void @single_constant_stride_ptr_iv(ptr %p) {
158 ; CHECK-LABEL: @single_constant_stride_ptr_iv(
160 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
162 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 8128
163 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
164 ; CHECK: vector.body:
165 ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
166 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
167 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <8 x i64> <i64 0, i64 8, i64 16, i64 24, i64 32, i64 40, i64 48, i64 56>
168 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <8 x ptr> [[TMP0]], i32 0
169 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[TMP1]], i32 0
170 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP2]], align 4
171 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
172 ; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i32> [[STRIDED_VEC]], <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
173 ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP3]], <8 x ptr> [[TMP0]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
174 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
175 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 64
176 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1016
177 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
178 ; CHECK: middle.block:
179 ; CHECK-NEXT: br label [[SCALAR_PH]]
181 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1016, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
182 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ]
183 ; CHECK-NEXT: br label [[LOOP:%.*]]
185 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
186 ; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
187 ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
188 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
189 ; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
190 ; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
191 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
192 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
193 ; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
195 ; CHECK-NEXT: ret void
200 %i = phi i64 [0, %entry], [%nexti, %loop]
201 %ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
203 %x0 = load i32, ptr %ptr
205 store i32 %y0, ptr %ptr
207 %ptr.next = getelementptr inbounds i8, ptr %ptr, i64 8
208 %nexti = add i64 %i, 1
209 %done = icmp eq i64 %nexti, 1024
210 br i1 %done, label %exit, label %loop
216 define void @single_stride_int_scaled(ptr %p, i64 %stride) {
217 ; NOSTRIDED-LABEL: @single_stride_int_scaled(
218 ; NOSTRIDED-NEXT: entry:
219 ; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
220 ; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
221 ; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
222 ; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
223 ; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
224 ; NOSTRIDED: vector.scevcheck:
225 ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
226 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
227 ; NOSTRIDED: vector.ph:
228 ; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
229 ; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
230 ; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
231 ; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
232 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
233 ; NOSTRIDED: vector.body:
234 ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
235 ; NOSTRIDED-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
236 ; NOSTRIDED-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP5]]
237 ; NOSTRIDED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], i32 0
238 ; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
239 ; NOSTRIDED-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
240 ; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP7]], align 4
241 ; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
242 ; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
243 ; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
244 ; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
245 ; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
246 ; NOSTRIDED: middle.block:
247 ; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
248 ; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
249 ; NOSTRIDED: scalar.ph:
250 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
251 ; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
253 ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
254 ; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
255 ; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
256 ; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
257 ; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
258 ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
259 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
260 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
261 ; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
263 ; NOSTRIDED-NEXT: ret void
265 ; STRIDED-LABEL: @single_stride_int_scaled(
266 ; STRIDED-NEXT: entry:
267 ; STRIDED-NEXT: br label [[LOOP:%.*]]
269 ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
270 ; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE:%.*]]
271 ; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
272 ; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
273 ; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
274 ; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
275 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
276 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
277 ; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
279 ; STRIDED-NEXT: ret void
284 %i = phi i64 [0, %entry], [%nexti, %loop]
286 %offset = mul nsw nuw i64 %i, %stride
287 %q0 = getelementptr i32, ptr %p, i64 %offset
288 %x0 = load i32, ptr %q0
290 store i32 %y0, ptr %q0
292 %nexti = add i64 %i, 1
293 %done = icmp eq i64 %nexti, 1024
294 br i1 %done, label %exit, label %loop
299 define void @single_stride_int_iv(ptr %p, i64 %stride) {
300 ; NOSTRIDED-LABEL: @single_stride_int_iv(
301 ; NOSTRIDED-NEXT: entry:
302 ; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
303 ; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
304 ; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
305 ; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
306 ; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
307 ; NOSTRIDED: vector.scevcheck:
308 ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
309 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
310 ; NOSTRIDED: vector.ph:
311 ; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
312 ; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
313 ; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
314 ; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
315 ; NOSTRIDED-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
316 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
317 ; NOSTRIDED: vector.body:
318 ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
319 ; NOSTRIDED-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
320 ; NOSTRIDED-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP5]]
321 ; NOSTRIDED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], i32 0
322 ; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
323 ; NOSTRIDED-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
324 ; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP7]], align 4
325 ; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
326 ; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
327 ; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
328 ; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
329 ; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
330 ; NOSTRIDED: middle.block:
331 ; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
332 ; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
333 ; NOSTRIDED: scalar.ph:
334 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
335 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
336 ; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
338 ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
339 ; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
340 ; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
341 ; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
342 ; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
343 ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
344 ; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
345 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
346 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
347 ; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
349 ; NOSTRIDED-NEXT: ret void
351 ; STRIDED-LABEL: @single_stride_int_iv(
352 ; STRIDED-NEXT: entry:
353 ; STRIDED-NEXT: br label [[LOOP:%.*]]
355 ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
356 ; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
357 ; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
358 ; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
359 ; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
360 ; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
361 ; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
362 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
363 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
364 ; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
366 ; STRIDED-NEXT: ret void
371 %i = phi i64 [0, %entry], [%nexti, %loop]
372 %offset = phi i64 [0, %entry], [%offset.next, %loop]
374 %q0 = getelementptr i32, ptr %p, i64 %offset
375 %x0 = load i32, ptr %q0
377 store i32 %y0, ptr %q0
379 %offset.next = add nsw nuw i64 %offset, %stride
380 %nexti = add i64 %i, 1
381 %done = icmp eq i64 %nexti, 1024
382 br i1 %done, label %exit, label %loop
388 define void @single_stride_ptr_iv(ptr %p, i64 %stride) {
389 ; CHECK-LABEL: @single_stride_ptr_iv(
391 ; CHECK-NEXT: br label [[LOOP:%.*]]
393 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
394 ; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
395 ; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
396 ; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
397 ; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
398 ; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE:%.*]]
399 ; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
400 ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
401 ; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
403 ; CHECK-NEXT: ret void
408 %i = phi i64 [0, %entry], [%nexti, %loop]
409 %ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
411 %x0 = load i32, ptr %ptr
413 store i32 %y0, ptr %ptr
415 %ptr.next = getelementptr inbounds i8, ptr %ptr, i64 %stride
416 %nexti = add i64 %i, 1
417 %done = icmp eq i64 %nexti, 1024
418 br i1 %done, label %exit, label %loop
423 define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
424 ; NOSTRIDED-LABEL: @double_stride_int_scaled(
425 ; NOSTRIDED-NEXT: entry:
426 ; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
427 ; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
428 ; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
429 ; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
430 ; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
431 ; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
432 ; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
433 ; NOSTRIDED: vector.scevcheck:
434 ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
435 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
436 ; NOSTRIDED: vector.memcheck:
437 ; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
438 ; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
439 ; NOSTRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
440 ; NOSTRIDED-NEXT: [[TMP6:%.*]] = sub i64 [[P21]], [[P3]]
441 ; NOSTRIDED-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
442 ; NOSTRIDED-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
443 ; NOSTRIDED: vector.ph:
444 ; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
445 ; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
446 ; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]]
447 ; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
448 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
449 ; NOSTRIDED: vector.body:
450 ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
451 ; NOSTRIDED-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
452 ; NOSTRIDED-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP9]]
453 ; NOSTRIDED-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 0
454 ; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
455 ; NOSTRIDED-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
456 ; NOSTRIDED-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[P2]], i64 [[TMP9]]
457 ; NOSTRIDED-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 0
458 ; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP14]], align 4
459 ; NOSTRIDED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
460 ; NOSTRIDED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
461 ; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
462 ; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
463 ; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
464 ; NOSTRIDED: middle.block:
465 ; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
466 ; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
467 ; NOSTRIDED: scalar.ph:
468 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
469 ; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
471 ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
472 ; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
473 ; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
474 ; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
475 ; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
476 ; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
477 ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
478 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
479 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
480 ; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
482 ; NOSTRIDED-NEXT: ret void
484 ; STRIDED-LABEL: @double_stride_int_scaled(
485 ; STRIDED-NEXT: entry:
486 ; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
487 ; STRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
488 ; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 24, i64 [[TMP1]])
489 ; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
490 ; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
491 ; STRIDED: vector.memcheck:
492 ; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 4092
493 ; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
494 ; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
495 ; STRIDED-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
496 ; STRIDED-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
497 ; STRIDED-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
498 ; STRIDED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
499 ; STRIDED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP3]]
500 ; STRIDED-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
501 ; STRIDED-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
502 ; STRIDED-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
503 ; STRIDED-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
504 ; STRIDED-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
505 ; STRIDED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
506 ; STRIDED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
507 ; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
508 ; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
509 ; STRIDED: vector.ph:
510 ; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
511 ; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
512 ; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
513 ; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
514 ; STRIDED-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
515 ; STRIDED-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i64> [[TMP10]], zeroinitializer
516 ; STRIDED-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i64> [[TMP11]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
517 ; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP12]]
518 ; STRIDED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
519 ; STRIDED-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4
520 ; STRIDED-NEXT: [[TMP15:%.*]] = mul i64 1, [[TMP14]]
521 ; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP15]], i64 0
522 ; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
523 ; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
524 ; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
525 ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
526 ; STRIDED: vector.body:
527 ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
528 ; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
529 ; STRIDED-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
530 ; STRIDED-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP16]]
531 ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP17]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison), !alias.scope !8
532 ; STRIDED-NEXT: [[TMP18:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
533 ; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP16]]
534 ; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)), !alias.scope !11, !noalias !8
535 ; STRIDED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
536 ; STRIDED-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 4
537 ; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
538 ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
539 ; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
540 ; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
541 ; STRIDED: middle.block:
542 ; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
543 ; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
544 ; STRIDED: scalar.ph:
545 ; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
546 ; STRIDED-NEXT: br label [[LOOP:%.*]]
548 ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
549 ; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
550 ; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
551 ; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
552 ; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
553 ; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
554 ; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
555 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
556 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
557 ; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
559 ; STRIDED-NEXT: ret void
564 %i = phi i64 [0, %entry], [%nexti, %loop]
566 %offset = mul nsw nuw i64 %i, %stride
567 %q0 = getelementptr i32, ptr %p, i64 %offset
568 %x0 = load i32, ptr %q0
570 %q1 = getelementptr i32, ptr %p2, i64 %offset
571 store i32 %y0, ptr %q1
573 %nexti = add i64 %i, 1
574 %done = icmp eq i64 %nexti, 1024
575 br i1 %done, label %exit, label %loop
580 define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
581 ; NOSTRIDED-LABEL: @double_stride_int_iv(
582 ; NOSTRIDED-NEXT: entry:
583 ; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
584 ; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
585 ; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
586 ; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
587 ; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
588 ; NOSTRIDED: vector.scevcheck:
589 ; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
590 ; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
591 ; NOSTRIDED: vector.ph:
592 ; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
593 ; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
594 ; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
595 ; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
596 ; NOSTRIDED-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
597 ; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
598 ; NOSTRIDED: vector.body:
599 ; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
600 ; NOSTRIDED-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
601 ; NOSTRIDED-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP5]]
602 ; NOSTRIDED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], i32 0
603 ; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
604 ; NOSTRIDED-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
605 ; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP7]], align 4
606 ; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
607 ; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
608 ; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
609 ; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
610 ; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
611 ; NOSTRIDED: middle.block:
612 ; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
613 ; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
614 ; NOSTRIDED: scalar.ph:
615 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
616 ; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
617 ; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
619 ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
620 ; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
621 ; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
622 ; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
623 ; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
624 ; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
625 ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
626 ; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
627 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
628 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
629 ; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
631 ; NOSTRIDED-NEXT: ret void
633 ; STRIDED-LABEL: @double_stride_int_iv(
634 ; STRIDED-NEXT: entry:
635 ; STRIDED-NEXT: br label [[LOOP:%.*]]
637 ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
638 ; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
639 ; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
640 ; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
641 ; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
642 ; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
643 ; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
644 ; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
645 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
646 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
647 ; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
649 ; STRIDED-NEXT: ret void
654 %i = phi i64 [0, %entry], [%nexti, %loop]
655 %offset = phi i64 [0, %entry], [%offset.next, %loop]
657 %q0 = getelementptr i32, ptr %p, i64 %offset
658 %x0 = load i32, ptr %q0
660 %q1 = getelementptr i32, ptr %p, i64 %offset
661 store i32 %y0, ptr %q1
663 %offset.next = add nsw nuw i64 %offset, %stride
664 %nexti = add i64 %i, 1
665 %done = icmp eq i64 %nexti, 1024
666 br i1 %done, label %exit, label %loop
672 define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
673 ; NOSTRIDED-LABEL: @double_stride_ptr_iv(
674 ; NOSTRIDED-NEXT: entry:
675 ; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
677 ; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
678 ; NOSTRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
679 ; NOSTRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[P2:%.*]], [[ENTRY]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
680 ; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
681 ; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
682 ; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[PTR2]], align 4
683 ; NOSTRIDED-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE:%.*]]
684 ; NOSTRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
685 ; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
686 ; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
687 ; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
689 ; NOSTRIDED-NEXT: ret void
691 ; STRIDED-LABEL: @double_stride_ptr_iv(
692 ; STRIDED-NEXT: entry:
693 ; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
694 ; STRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
695 ; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
696 ; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
697 ; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
698 ; STRIDED: vector.memcheck:
699 ; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 1023
700 ; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
701 ; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
702 ; STRIDED-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
703 ; STRIDED-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
704 ; STRIDED-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
705 ; STRIDED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
706 ; STRIDED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP3]]
707 ; STRIDED-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
708 ; STRIDED-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
709 ; STRIDED-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
710 ; STRIDED-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
711 ; STRIDED-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
712 ; STRIDED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
713 ; STRIDED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
714 ; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
715 ; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
716 ; STRIDED: vector.ph:
717 ; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
718 ; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
719 ; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
720 ; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
721 ; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
722 ; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
723 ; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
724 ; STRIDED-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP11]]
725 ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
726 ; STRIDED: vector.body:
727 ; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
728 ; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
729 ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
730 ; STRIDED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
731 ; STRIDED-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4
732 ; STRIDED-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 1
733 ; STRIDED-NEXT: [[TMP15:%.*]] = mul i64 [[STRIDE]], [[TMP14]]
734 ; STRIDED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP13]], 0
735 ; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP16]], i64 0
736 ; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
737 ; STRIDED-NEXT: [[TMP17:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
738 ; STRIDED-NEXT: [[TMP18:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT]], [[TMP17]]
739 ; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
740 ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
741 ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 4 x i64> [[TMP18]], [[DOTSPLAT10]]
742 ; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[VECTOR_GEP]]
743 ; STRIDED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
744 ; STRIDED-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 4
745 ; STRIDED-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 1
746 ; STRIDED-NEXT: [[TMP23:%.*]] = mul i64 [[STRIDE]], [[TMP22]]
747 ; STRIDED-NEXT: [[TMP24:%.*]] = mul i64 [[TMP21]], 0
748 ; STRIDED-NEXT: [[DOTSPLATINSERT13:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP24]], i64 0
749 ; STRIDED-NEXT: [[DOTSPLAT14:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT13]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
750 ; STRIDED-NEXT: [[TMP25:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
751 ; STRIDED-NEXT: [[TMP26:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT14]], [[TMP25]]
752 ; STRIDED-NEXT: [[VECTOR_GEP17:%.*]] = mul <vscale x 4 x i64> [[TMP26]], [[DOTSPLAT10]]
753 ; STRIDED-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[VECTOR_GEP17]]
754 ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> poison), !alias.scope !15
755 ; STRIDED-NEXT: [[TMP28:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
756 ; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP28]], <vscale x 4 x ptr> [[TMP27]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)), !alias.scope !18, !noalias !15
757 ; STRIDED-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
758 ; STRIDED-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 4
759 ; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP30]]
760 ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP15]]
761 ; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP23]]
762 ; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
763 ; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
764 ; STRIDED: middle.block:
765 ; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
766 ; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
767 ; STRIDED: scalar.ph:
768 ; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
769 ; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
770 ; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END7]], [[MIDDLE_BLOCK]] ], [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
771 ; STRIDED-NEXT: br label [[LOOP:%.*]]
773 ; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
774 ; STRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
775 ; STRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
776 ; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
777 ; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
778 ; STRIDED-NEXT: store i32 [[Y0]], ptr [[PTR2]], align 4
779 ; STRIDED-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE]]
780 ; STRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
781 ; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
782 ; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
783 ; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
785 ; STRIDED-NEXT: ret void
790 %i = phi i64 [0, %entry], [%nexti, %loop]
791 %ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
792 %ptr2 = phi ptr [%p2, %entry], [%ptr2.next, %loop]
794 %x0 = load i32, ptr %ptr
796 store i32 %y0, ptr %ptr2
798 %ptr.next = getelementptr inbounds i8, ptr %ptr, i64 %stride
799 %ptr2.next = getelementptr inbounds i8, ptr %ptr2, i64 %stride
800 %nexti = add i64 %i, 1
801 %done = icmp eq i64 %nexti, 1024
802 br i1 %done, label %exit, label %loop