1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -mtriple=aarch64-none-linux-gnu -S -passes=loop-vectorize,instcombine -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -mattr=+sve -scalable-vectorization=on -runtime-memory-check-threshold=24 < %s | FileCheck %s
4 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
6 ; Check vectorization on an interleaved load group of factor 2 and an interleaved
7 ; store group of factor 2.
11 ; void test_array_load2_store2(int C, int D) {
12 ; for (int i = 0; i < 1024; i+=2) {
21 @AB = common global [1024 x i32] zeroinitializer, align 4
22 @CD = common global [1024 x i32] zeroinitializer, align 4
24 define void @test_array_load2_store2(i32 %C, i32 %D) #1 {
25 ; CHECK-LABEL: @test_array_load2_store2(
27 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
29 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
30 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
31 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
32 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
33 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[D:%.*]], i64 0
34 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
35 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
37 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
38 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
39 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]]
40 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
41 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
42 ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
43 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
44 ; CHECK-NEXT: [[TMP6:%.*]] = add nsw <vscale x 4 x i32> [[TMP3]], [[BROADCAST_SPLAT]]
45 ; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <vscale x 4 x i32> [[TMP4]], [[BROADCAST_SPLAT2]]
46 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]]
47 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP6]], <vscale x 4 x i32> [[TMP7]])
48 ; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP8]], align 4
49 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
50 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
51 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
52 ; CHECK: middle.block:
53 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
55 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
57 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP3:![0-9]+]]
59 ; CHECK-NEXT: ret void
64 for.body: ; preds = %for.body, %entry
65 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
66 %arrayidx0 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %indvars.iv
67 %load1 = load i32, ptr %arrayidx0, align 4
68 %or = or disjoint i64 %indvars.iv, 1
69 %arrayidx1 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %or
70 %load2 = load i32, ptr %arrayidx1, align 4
71 %add = add nsw i32 %load1, %C
72 %mul = mul nsw i32 %load2, %D
73 %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %indvars.iv
74 store i32 %add, ptr %arrayidx2, align 4
75 %arrayidx3 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %or
76 store i32 %mul, ptr %arrayidx3, align 4
77 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
78 %cmp = icmp slt i64 %indvars.iv.next, 1024
79 br i1 %cmp, label %for.body, label %for.end
81 for.end: ; preds = %for.body
85 ; Check vectorization on an interleaved load group of factor 2 with narrower types and an interleaved
86 ; store group of factor 2.
90 ; void test_array_load2_store2(int C, int D) {
91 ; for (int i = 0; i < 1024; i+=2) {
100 @AB_i16 = common global [1024 x i16] zeroinitializer, align 4
102 define void @test_array_load2_i16_store2(i32 %C, i32 %D) #1 {
103 ; CHECK-LABEL: @test_array_load2_i16_store2(
105 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
107 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
108 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
109 ; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
110 ; CHECK-NEXT: [[TMP3:%.*]] = shl <vscale x 4 x i64> [[TMP2]], splat (i64 1)
111 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP0]], 3
112 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
113 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
114 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
115 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
116 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[D:%.*]], i64 0
117 ; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
118 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
119 ; CHECK: vector.body:
120 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
121 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
122 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
123 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, <vscale x 4 x i64> [[VEC_IND]]
124 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 2, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison)
125 ; CHECK-NEXT: [[TMP7:%.*]] = or disjoint <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
126 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, <vscale x 4 x i64> [[TMP7]]
127 ; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 2, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison)
128 ; CHECK-NEXT: [[TMP9:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
129 ; CHECK-NEXT: [[TMP10:%.*]] = add nsw <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[TMP9]]
130 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]]
131 ; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER1]] to <vscale x 4 x i32>
132 ; CHECK-NEXT: [[TMP12:%.*]] = mul nsw <vscale x 4 x i32> [[BROADCAST_SPLAT3]], [[TMP11]]
133 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP12]])
134 ; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP14]], align 4
135 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
136 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
137 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
138 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
139 ; CHECK: middle.block:
140 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
142 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
144 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP5:![0-9]+]]
146 ; CHECK-NEXT: ret void
151 for.body: ; preds = %entry, %for.body
152 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
153 %arrayidx = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, i64 %indvars.iv
154 %0 = load i16, ptr %arrayidx, align 2
155 %1 = or disjoint i64 %indvars.iv, 1
156 %arrayidx2 = getelementptr inbounds [1024 x i16], ptr @AB_i16, i64 0, i64 %1
157 %2 = load i16, ptr %arrayidx2, align 2
158 %conv = sext i16 %0 to i32
159 %add3 = add nsw i32 %conv, %C
160 %arrayidx5 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %indvars.iv
161 store i32 %add3, ptr %arrayidx5, align 4
162 %conv6 = sext i16 %2 to i32
163 %mul = mul nsw i32 %conv6, %D
164 %arrayidx9 = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 %1
165 store i32 %mul, ptr %arrayidx9, align 4
166 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
167 %cmp = icmp ult i64 %indvars.iv, 1022
168 br i1 %cmp, label %for.body, label %for.end
170 for.end: ; preds = %for.body
174 ; Check vectorization on an interleaved load group of factor 2 and an interleaved
175 ; store group of factor 2 with narrower types.
179 ; void test_array_load2_store2(int C, int D) {
180 ; for (int i = 0; i < 1024; i+=2) {
189 @CD_i16 = dso_local local_unnamed_addr global [1024 x i16] zeroinitializer, align 2
191 define void @test_array_load2_store2_i16(i32 noundef %C, i32 noundef %D) #1 {
192 ; CHECK-LABEL: @test_array_load2_store2_i16(
194 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
196 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
197 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
198 ; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
199 ; CHECK-NEXT: [[TMP3:%.*]] = shl <vscale x 4 x i64> [[TMP2]], splat (i64 1)
200 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP0]], 3
201 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP5]], i64 0
202 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
203 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
204 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
205 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[D:%.*]], i64 0
206 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
207 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
208 ; CHECK: vector.body:
209 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
210 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
211 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
212 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]]
213 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP6]], align 4
214 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
215 ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
216 ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
217 ; CHECK-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
218 ; CHECK-NEXT: [[TMP10:%.*]] = add nsw <vscale x 4 x i32> [[TMP7]], [[BROADCAST_SPLAT]]
219 ; CHECK-NEXT: [[TMP11:%.*]] = trunc <vscale x 4 x i32> [[TMP10]] to <vscale x 4 x i16>
220 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, <vscale x 4 x i64> [[VEC_IND]]
221 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> [[TMP11]], <vscale x 4 x ptr> [[TMP12]], i32 2, <vscale x 4 x i1> splat (i1 true))
222 ; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <vscale x 4 x i32> [[TMP8]], [[BROADCAST_SPLAT2]]
223 ; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i32> [[TMP13]] to <vscale x 4 x i16>
224 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, <vscale x 4 x i64> [[TMP9]]
225 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> [[TMP14]], <vscale x 4 x ptr> [[TMP15]], i32 2, <vscale x 4 x i1> splat (i1 true))
226 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
227 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
228 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
229 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
230 ; CHECK: middle.block:
231 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
233 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
235 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP7:![0-9]+]]
237 ; CHECK-NEXT: ret void
242 for.body: ; preds = %entry, %for.body
243 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
244 %arrayidx = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %indvars.iv
245 %0 = load i32, ptr %arrayidx, align 4
246 %1 = or disjoint i64 %indvars.iv, 1
247 %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %1
248 %2 = load i32, ptr %arrayidx2, align 4
249 %add3 = add nsw i32 %0, %C
250 %conv = trunc i32 %add3 to i16
251 %arrayidx5 = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, i64 %indvars.iv
252 store i16 %conv, ptr %arrayidx5, align 2
253 %mul = mul nsw i32 %2, %D
254 %conv6 = trunc i32 %mul to i16
255 %arrayidx9 = getelementptr inbounds [1024 x i16], ptr @CD_i16, i64 0, i64 %1
256 store i16 %conv6, ptr %arrayidx9, align 2
257 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
258 %cmp = icmp ult i64 %indvars.iv, 1022
259 br i1 %cmp, label %for.body, label %for.end
261 for.end: ; preds = %for.body
265 ; Check vectorization on an interleaved load group of factor 6.
266 ; There is no dedicated ldN/stN so use gather instead
268 %struct.ST6 = type { i32, i32, i32, i32, i32, i32 }
270 define i32 @test_struct_load6(ptr %S) #1 {
271 ; CHECK-LABEL: @test_struct_load6(
273 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
275 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
276 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
277 ; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
278 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP1]], i64 0
279 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
280 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
281 ; CHECK: vector.body:
282 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
283 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP2]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
284 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
285 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[S:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
286 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP5]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
287 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 1
288 ; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
289 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 2
290 ; CHECK-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
291 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 3
292 ; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
293 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 4
294 ; CHECK-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP9]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
295 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[S]], <vscale x 4 x i64> [[VEC_IND]], i32 5
296 ; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
297 ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
298 ; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER2]]
299 ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER1]], [[WIDE_MASKED_GATHER3]]
300 ; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP13]], [[WIDE_MASKED_GATHER4]]
301 ; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER5]]
302 ; CHECK-NEXT: [[TMP16]] = sub <vscale x 4 x i32> [[TMP12]], [[TMP15]]
303 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
304 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
305 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
306 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
307 ; CHECK: middle.block:
308 ; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
309 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
311 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
313 ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
314 ; CHECK: for.cond.cleanup:
315 ; CHECK-NEXT: [[SUB14_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
316 ; CHECK-NEXT: ret i32 [[SUB14_LCSSA]]
321 for.body: ; preds = %entry, %for.body
322 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
323 %r.041 = phi i32 [ 0, %entry ], [ %sub14, %for.body ]
324 %x = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 0
325 %0 = load i32, ptr %x, align 4
326 %y = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 1
327 %1 = load i32, ptr %y, align 4
328 %z = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 2
329 %2 = load i32, ptr %z, align 4
330 %w = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 3
331 %3 = load i32, ptr %w, align 4
332 %a = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 4
333 %4 = load i32, ptr %a, align 4
334 %b = getelementptr inbounds %struct.ST6, ptr %S, i64 %indvars.iv, i32 5
335 %5 = load i32, ptr %b, align 4
336 %.neg36 = add i32 %0, %r.041
337 %6 = add i32 %.neg36, %2
341 %sub14 = sub i32 %6, %9
342 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
343 %exitcond.not = icmp eq i64 %indvars.iv.next, 1024
344 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
346 for.cond.cleanup: ; preds = %for.body
347 %sub14.lcssa = phi i32 [ %sub14, %for.body ]
352 ; Check vectorization on a reverse interleaved load group of factor 2 and
353 ; a reverse interleaved store group of factor 2.
360 ; void test_reversed_load2_store2(struct ST2 *A, struct ST2 *B) {
361 ; for (int i = 1023; i >= 0; i--) {
362 ; int a = A[i].x + i; // interleaved load of index 0
363 ; int b = A[i].y - i; // interleaved load of index 1
364 ; B[i].x = a; // interleaved store of index 0
365 ; B[i].y = b; // interleaved store of index 1
370 %struct.ST2 = type { i32, i32 }
372 define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
373 ; CHECK-LABEL: @test_reversed_load2_store2(
375 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
377 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
378 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
379 ; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
380 ; CHECK-NEXT: [[INDUCTION:%.*]] = sub <vscale x 4 x i32> splat (i32 1023), [[TMP2]]
381 ; CHECK-NEXT: [[TMP3:%.*]] = trunc nuw nsw i64 [[TMP1]] to i32
382 ; CHECK-NEXT: [[DOTNEG:%.*]] = sub nsw i32 0, [[TMP3]]
383 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[DOTNEG]], i64 0
384 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
385 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
386 ; CHECK: vector.body:
387 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
388 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
389 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
390 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[A:%.*]], i64 [[OFFSET_IDX]], i32 0
391 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
392 ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i32 [[TMP5]], 3
393 ; CHECK-NEXT: [[TMP7:%.*]] = sub nsw i32 2, [[TMP6]]
394 ; CHECK-NEXT: [[TMP8:%.*]] = sext i32 [[TMP7]] to i64
395 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP8]]
396 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP9]], align 4
397 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
398 ; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
399 ; CHECK-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP10]])
400 ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
401 ; CHECK-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP11]])
402 ; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 4 x i32> [[REVERSE]], [[VEC_IND]]
403 ; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <vscale x 4 x i32> [[REVERSE1]], [[VEC_IND]]
404 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_ST2]], ptr [[B:%.*]], i64 [[OFFSET_IDX]], i32 0
405 ; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vscale.i32()
406 ; CHECK-NEXT: [[TMP16:%.*]] = shl nuw nsw i32 [[TMP15]], 3
407 ; CHECK-NEXT: [[TMP17:%.*]] = sub nsw i32 2, [[TMP16]]
408 ; CHECK-NEXT: [[TMP18:%.*]] = sext i32 [[TMP17]] to i64
409 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i64 [[TMP18]]
410 ; CHECK-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP12]])
411 ; CHECK-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP13]])
412 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[REVERSE2]], <vscale x 4 x i32> [[REVERSE3]])
413 ; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP19]], align 4
414 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
415 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
416 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
417 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
418 ; CHECK: middle.block:
419 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
421 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
422 ; CHECK: for.cond.cleanup:
423 ; CHECK-NEXT: ret void
425 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]], !llvm.loop [[LOOP11:![0-9]+]]
430 for.cond.cleanup: ; preds = %for.body
433 for.body: ; preds = %for.body, %entry
434 %indvars.iv = phi i64 [ 1023, %entry ], [ %indvars.iv.next, %for.body ]
435 %x = getelementptr inbounds %struct.ST2, ptr %A, i64 %indvars.iv, i32 0
436 %load1 = load i32, ptr %x, align 4
437 %trunc = trunc i64 %indvars.iv to i32
438 %add = add nsw i32 %load1, %trunc
439 %y = getelementptr inbounds %struct.ST2, ptr %A, i64 %indvars.iv, i32 1
440 %load2 = load i32, ptr %y, align 4
441 %sub = sub nsw i32 %load2, %trunc
442 %x5 = getelementptr inbounds %struct.ST2, ptr %B, i64 %indvars.iv, i32 0
443 store i32 %add, ptr %x5, align 4
444 %y8 = getelementptr inbounds %struct.ST2, ptr %B, i64 %indvars.iv, i32 1
445 store i32 %sub, ptr %y8, align 4
446 %indvars.iv.next = add nsw i64 %indvars.iv, -1
447 %cmp = icmp sgt i64 %indvars.iv, 0
448 br i1 %cmp, label %for.body, label %for.cond.cleanup
451 ; Check vectorization on an interleaved load group of factor 2 with 1 gap
452 ; (missing the load of odd elements). Because the vectorized loop would
453 ; speculatively access memory out-of-bounds, we must execute at least one
454 ; iteration of the scalar loop.
456 ; void even_load_static_tc(int *A, int *B) {
457 ; for (unsigned i = 0; i < 1024; i+=2)
462 define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
463 ; CHECK-LABEL: @even_load_static_tc(
465 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
467 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
468 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
469 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nuw nsw i64 512, [[TMP1]]
470 ; CHECK-NEXT: [[IND_END:%.*]] = shl nuw nsw i64 [[N_VEC]], 1
471 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
472 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
473 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
474 ; CHECK: vector.body:
475 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
476 ; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[INDEX]], 3
477 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[DOTIDX]]
478 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP4]], align 4
479 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
480 ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
481 ; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <vscale x 4 x i32> [[TMP5]], splat (i32 1)
482 ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[INDEX]], 9223372036854775804
483 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP7]]
484 ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP6]], ptr [[TMP8]], align 4
485 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
486 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
487 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
488 ; CHECK: middle.block:
489 ; CHECK-NEXT: br label [[SCALAR_PH]]
491 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ poison, [[ENTRY:%.*]] ]
492 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
493 ; CHECK: for.cond.cleanup:
494 ; CHECK-NEXT: ret void
496 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
497 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
498 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
499 ; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[LOAD]], 1
500 ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
501 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[LSHR]]
502 ; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
503 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
504 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV]], 1022
505 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP13:![0-9]+]]
510 for.cond.cleanup: ; preds = %for.body
513 for.body: ; preds = %for.body, %entry
514 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
515 %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
516 %load = load i32, ptr %arrayidx, align 4
517 %mul = shl nsw i32 %load, 1
518 %lshr = lshr exact i64 %indvars.iv, 1
519 %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %lshr
520 store i32 %mul, ptr %arrayidx2, align 4
521 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
522 %cmp = icmp ult i64 %indvars.iv.next, 1024
523 br i1 %cmp, label %for.body, label %for.cond.cleanup
526 ; Check vectorization on an interleaved load group of factor 2 with 1 gap
527 ; (missing the load of odd elements). Because the vectorized loop would
528 ; speculatively access memory out-of-bounds, we must execute at least one
529 ; iteration of the scalar loop.
531 ; void even_load_dynamic_tc(int *A, int *B, unsigned N) {
532 ; for (unsigned i = 0; i < N; i+=2)
537 define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i64 %N) #1 {
538 ; CHECK-LABEL: @even_load_dynamic_tc(
540 ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 2)
541 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1
542 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
543 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
544 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
545 ; CHECK-NEXT: [[MIN_ITERS_CHECK_NOT_NOT:%.*]] = icmp samesign ult i64 [[TMP1]], [[TMP3]]
546 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_NOT_NOT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
548 ; CHECK-NEXT: [[TMP4:%.*]] = add nuw i64 [[TMP1]], 1
549 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
550 ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 2
551 ; CHECK-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP6]], -1
552 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[TMP4]], [[TMP7]]
553 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
554 ; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP6]], i64 [[N_MOD_VF]]
555 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[TMP9]]
556 ; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1
557 ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
558 ; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
559 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
560 ; CHECK: vector.body:
561 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
562 ; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[INDEX]], 3
563 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[DOTIDX]]
564 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4
565 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
566 ; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
567 ; CHECK-NEXT: [[TMP14:%.*]] = shl nsw <vscale x 4 x i32> [[TMP13]], splat (i32 1)
568 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[INDEX]], 9223372036854775804
569 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP15]]
570 ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4
571 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
572 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
573 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
574 ; CHECK: middle.block:
575 ; CHECK-NEXT: br label [[SCALAR_PH]]
577 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
578 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
579 ; CHECK: for.cond.cleanup:
580 ; CHECK-NEXT: ret void
582 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
583 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
584 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
585 ; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[LOAD]], 1
586 ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
587 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[LSHR]]
588 ; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
589 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
590 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[N]]
591 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP15:![0-9]+]]
596 for.cond.cleanup: ; preds = %for.body
599 for.body: ; preds = %for.body, %entry
600 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
601 %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
602 %load = load i32, ptr %arrayidx, align 4
603 %mul = shl nsw i32 %load, 1
604 %lshr = lshr exact i64 %indvars.iv, 1
605 %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %lshr
606 store i32 %mul, ptr %arrayidx2, align 4
607 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
608 %cmp = icmp ult i64 %indvars.iv.next, %N
609 br i1 %cmp, label %for.body, label %for.cond.cleanup
612 ; Check vectorization on a reverse interleaved load group of factor 2 with 1
613 ; gap and a reverse interleaved store group of factor 2. The interleaved load
614 ; group should be removed since it has a gap and is reverse.
621 ; void load_gap_reverse(struct pair *P1, struct pair *P2, int X) {
622 ; for (int i = 1023; i >= 0; i--) {
624 ; int b = A[i].y - i;
630 %pair = type { i64, i64 }
631 define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias nocapture readonly %P2, i64 %X) #1 {
632 ; CHECK-LABEL: @load_gap_reverse(
634 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
636 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
637 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
638 ; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
639 ; CHECK-NEXT: [[INDUCTION:%.*]] = sub <vscale x 4 x i64> splat (i64 1023), [[TMP2]]
640 ; CHECK-NEXT: [[DOTNEG:%.*]] = sub nsw i64 0, [[TMP1]]
641 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[DOTNEG]], i64 0
642 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
643 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[X:%.*]], i64 0
644 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
645 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
646 ; CHECK: vector.body:
647 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
648 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
649 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[VEC_IND]]
650 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[PAIR:%.*]], ptr [[P1:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
651 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[PAIR]], ptr [[P2:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 1
652 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[TMP6]], i32 8, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i64> poison)
653 ; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], [[VEC_IND]]
654 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> [[TMP4]], <vscale x 4 x ptr> [[TMP5]], i32 8, <vscale x 4 x i1> splat (i1 true))
655 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> [[TMP7]], <vscale x 4 x ptr> [[TMP6]], i32 8, <vscale x 4 x i1> splat (i1 true))
656 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
657 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
658 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
659 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
660 ; CHECK: middle.block:
661 ; CHECK-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
663 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
665 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_EXIT]], !llvm.loop [[LOOP17:![0-9]+]]
667 ; CHECK-NEXT: ret void
673 %i = phi i64 [ 1023, %entry ], [ %i.next, %for.body ]
674 %0 = add nsw i64 %X, %i
675 %1 = getelementptr inbounds %pair, ptr %P1, i64 %i, i32 0
676 %2 = getelementptr inbounds %pair, ptr %P2, i64 %i, i32 1
677 %3 = load i64, ptr %2, align 8
678 %4 = sub nsw i64 %3, %i
679 store i64 %0, ptr %1, align 8
680 store i64 %4, ptr %2, align 8
681 %i.next = add nsw i64 %i, -1
682 %cond = icmp sgt i64 %i, 0
683 br i1 %cond, label %for.body, label %for.exit
689 ; Check vectorization on interleaved access groups identified from mixed
691 ; void mixed_load2_store2(int *A, int *B) {
692 ; for (unsigned i = 0; i < 1024; i+=2) {
693 ; B[i] = A[i] * A[i+1];
694 ; B[i+1] = A[i] + A[i+1];
699 define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias nocapture %B) #1 {
700 ; CHECK-LABEL: @mixed_load2_store2(
702 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
704 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
705 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
706 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
707 ; CHECK: vector.body:
708 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
709 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
710 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
711 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
712 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
713 ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
714 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
715 ; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <vscale x 4 x i32> [[TMP4]], [[TMP3]]
716 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[OFFSET_IDX]]
717 ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
718 ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
719 ; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 1
720 ; CHECK-NEXT: [[TMP9:%.*]] = add nsw <vscale x 4 x i32> [[TMP8]], [[TMP7]]
721 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP6]], <vscale x 4 x i32> [[TMP9]])
722 ; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP10]], align 4
723 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
724 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
725 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
726 ; CHECK: middle.block:
727 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
729 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
730 ; CHECK: for.cond.cleanup:
731 ; CHECK-NEXT: ret void
733 ; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]], !llvm.loop [[LOOP19:![0-9]+]]
738 for.cond.cleanup: ; preds = %for.body
741 for.body: ; preds = %for.body, %entry
742 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
743 %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
744 %load1 = load i32, ptr %arrayidx, align 4
745 %or = or disjoint i64 %indvars.iv, 1
746 %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %or
747 %load2 = load i32, ptr %arrayidx2, align 4
748 %mul = mul nsw i32 %load2, %load1
749 %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
750 store i32 %mul, ptr %arrayidx4, align 4
751 %load3 = load i32, ptr %arrayidx, align 4
752 %load4 = load i32, ptr %arrayidx2, align 4
753 %add10 = add nsw i32 %load4, %load3
754 %arrayidx13 = getelementptr inbounds i32, ptr %B, i64 %or
755 store i32 %add10, ptr %arrayidx13, align 4
756 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
757 %cmp = icmp ult i64 %indvars.iv.next, 1024
758 br i1 %cmp, label %for.body, label %for.cond.cleanup
762 ; Check vectorization on interleaved access groups with members having different
773 ; void int_float_struct(struct IntFloat *A) {
776 ; for (unsigned i = 0; i < 1024; i++) {
785 %struct.IntFloat = type { i32, float }
787 @SA = common global i32 0, align 4
788 @SB = common global float 0.000000e+00, align 4
790 define void @int_float_struct(ptr nocapture readonly %p) #0 {
791 ; CHECK-LABEL: @int_float_struct(
793 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
795 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
796 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
797 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
798 ; CHECK: vector.body:
799 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
800 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> zeroinitializer, float undef, i32 0), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
801 ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 undef, i32 0), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
802 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_INTFLOAT:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
803 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP2]], align 4
804 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
805 ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
806 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
807 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <vscale x 4 x i32> [[TMP4]] to <vscale x 4 x float>
808 ; CHECK-NEXT: [[TMP6]] = add <vscale x 4 x i32> [[TMP3]], [[VEC_PHI1]]
809 ; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[VEC_PHI]], [[TMP5]]
810 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
811 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
812 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
813 ; CHECK: middle.block:
814 ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]])
815 ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP6]])
816 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
818 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
819 ; CHECK: for.cond.cleanup:
820 ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
821 ; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi float [ poison, [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
822 ; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr @SA, align 4
823 ; CHECK-NEXT: store float [[ADD3_LCSSA]], ptr @SB, align 4
824 ; CHECK-NEXT: ret void
826 ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
831 for.cond.cleanup: ; preds = %for.body
832 store i32 %add, ptr @SA, align 4
833 store float %add3, ptr @SB, align 4
836 for.body: ; preds = %for.body, %entry
837 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
838 %SumB.014 = phi float [ undef, %entry ], [ %add3, %for.body ]
839 %SumA.013 = phi i32 [ undef, %entry ], [ %add, %for.body ]
840 %a = getelementptr inbounds %struct.IntFloat, ptr %p, i64 %indvars.iv, i32 0
841 %load1 = load i32, ptr %a, align 4
842 %add = add nsw i32 %load1, %SumA.013
843 %b = getelementptr inbounds %struct.IntFloat, ptr %p, i64 %indvars.iv, i32 1
844 %load2 = load float, ptr %b, align 4
845 %add3 = fadd fast float %SumB.014, %load2
846 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
847 %exitcond = icmp eq i64 %indvars.iv.next, 1024
848 br i1 %exitcond, label %for.cond.cleanup, label %for.body
851 ; Check vectorization of interleaved access groups in the presence of
852 ; dependences (PR27626). The following tests check that we don't reorder
853 ; dependent loads and stores when generating code for interleaved access
854 ; groups. Stores should be scalarized because the required code motion would
855 ; break dependences, and the remaining interleaved load groups should have
858 ; PR27626_0: Ensure a strided store is not moved after a dependent (zero
859 ; distance) strided load.
861 ; void PR27626_0(struct pair *p, int z, int n) {
862 ; for (int i = 0; i < n; i++) {
868 %pair.i32 = type { i32, i32 }
869 define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
870 ; CHECK-LABEL: @PR27626_0(
872 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
873 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
874 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
875 ; CHECK-NEXT: [[MIN_ITERS_CHECK_NOT:%.*]] = icmp samesign ugt i64 [[SMAX]], [[TMP1]]
876 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_NOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH:%.*]]
878 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
879 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
880 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], -1
881 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[SMAX]], [[TMP4]]
882 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
883 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
884 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
885 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
886 ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
887 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
888 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
889 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
890 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
891 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
892 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
893 ; CHECK: vector.body:
894 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
895 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
896 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
897 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
898 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
899 ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 4 x ptr> [[TMP12]], i64 0
900 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP14]], align 4
901 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
902 ; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
903 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true))
904 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
905 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
906 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
907 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
908 ; CHECK: middle.block:
909 ; CHECK-NEXT: br label [[SCALAR_PH]]
911 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
912 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
914 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
915 ; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
916 ; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
917 ; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
918 ; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_Y]], align 4
919 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
920 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
921 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END:%.*]], !llvm.loop [[LOOP23:![0-9]+]]
923 ; CHECK-NEXT: ret void
929 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
930 %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
931 %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
932 store i32 %z, ptr %p_i.x, align 4
933 %0 = load i32, ptr %p_i.x, align 4
934 store i32 %0, ptr %p_i.y, align 4
935 %i.next = add nuw nsw i64 %i, 1
936 %cond = icmp slt i64 %i.next, %n
937 br i1 %cond, label %for.body, label %for.end
943 ; PR27626_1: Ensure a strided load is not moved before a dependent (zero
944 ; distance) strided store.
946 ; void PR27626_1(struct pair *p, int n) {
948 ; for (int i = 0; i < n; i++) {
954 define i32 @PR27626_1(ptr %p, i64 %n) #1 {
955 ; CHECK-LABEL: @PR27626_1(
957 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
958 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
959 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
960 ; CHECK-NEXT: [[MIN_ITERS_CHECK_NOT:%.*]] = icmp samesign ugt i64 [[SMAX]], [[TMP1]]
961 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_NOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH:%.*]]
963 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
964 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
965 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], -1
966 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[SMAX]], [[TMP4]]
967 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
968 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
969 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
970 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
971 ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
972 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
973 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
974 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
975 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
976 ; CHECK: vector.body:
977 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
978 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
979 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
980 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
981 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
982 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4
983 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
984 ; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
985 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true))
986 ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x ptr> [[TMP13]], i64 0
987 ; CHECK-NEXT: [[WIDE_VEC1:%.*]] = load <vscale x 8 x i32>, ptr [[TMP15]], align 4
988 ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]])
989 ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
990 ; CHECK-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP16]], [[VEC_PHI]]
991 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
992 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
993 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
994 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
995 ; CHECK: middle.block:
996 ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP17]])
997 ; CHECK-NEXT: br label [[SCALAR_PH]]
999 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1000 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1001 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1003 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1004 ; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP21:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1005 ; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
1006 ; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
1007 ; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[P_I_X]], align 4
1008 ; CHECK-NEXT: store i32 [[TMP20]], ptr [[P_I_Y]], align 4
1009 ; CHECK-NEXT: [[TMP21]] = add nsw i32 [[TMP20]], [[S]]
1010 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
1011 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
1012 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END:%.*]], !llvm.loop [[LOOP25:![0-9]+]]
1014 ; CHECK-NEXT: ret i32 [[TMP21]]
1020 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
1021 %s = phi i32 [ %2, %for.body ], [ 0, %entry ]
1022 %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
1023 %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
1024 %0 = load i32, ptr %p_i.x, align 4
1025 store i32 %0, ptr %p_i.y, align 4
1026 %1 = load i32, ptr %p_i.y, align 4
1027 %2 = add nsw i32 %1, %s
1028 %i.next = add nuw nsw i64 %i, 1
1029 %cond = icmp slt i64 %i.next, %n
1030 br i1 %cond, label %for.body, label %for.end
1033 %3 = phi i32 [ %2, %for.body ]
1037 ; PR27626_2: Ensure a strided store is not moved after a dependent (negative
1038 ; distance) strided load.
1040 ; void PR27626_2(struct pair *p, int z, int n) {
1041 ; for (int i = 0; i < n; i++) {
1043 ; p[i].y = p[i - 1].x;
1047 define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
1048 ; CHECK-LABEL: @PR27626_2(
1049 ; CHECK-NEXT: entry:
1050 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
1051 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
1052 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
1053 ; CHECK-NEXT: [[MIN_ITERS_CHECK_NOT:%.*]] = icmp samesign ugt i64 [[SMAX]], [[TMP1]]
1054 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_NOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH:%.*]]
1056 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
1057 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
1058 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], -1
1059 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[SMAX]], [[TMP4]]
1060 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
1061 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
1062 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
1063 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
1064 ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
1065 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
1066 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
1067 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1068 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
1069 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1070 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1071 ; CHECK: vector.body:
1072 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1073 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1074 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0
1075 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
1076 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[VEC_IND]], i32 1
1077 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
1078 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
1079 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
1080 ; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
1081 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true))
1082 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
1083 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
1084 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1085 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
1086 ; CHECK: middle.block:
1087 ; CHECK-NEXT: br label [[SCALAR_PH]]
1089 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1090 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1092 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1093 ; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
1094 ; CHECK-NEXT: [[P_I_MINUS_1_X:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 -8
1095 ; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
1096 ; CHECK-NEXT: store i32 [[Z]], ptr [[P_I_X]], align 4
1097 ; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[P_I_MINUS_1_X]], align 4
1098 ; CHECK-NEXT: store i32 [[TMP17]], ptr [[P_I_Y]], align 4
1099 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
1100 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
1101 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END:%.*]], !llvm.loop [[LOOP27:![0-9]+]]
1103 ; CHECK-NEXT: ret void
1109 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
1110 %i_minus_1 = add nuw nsw i64 %i, -1
1111 %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
1112 %p_i_minus_1.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i_minus_1, i32 0
1113 %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
1114 store i32 %z, ptr %p_i.x, align 4
1115 %0 = load i32, ptr %p_i_minus_1.x, align 4
1116 store i32 %0, ptr %p_i.y, align 4
1117 %i.next = add nuw nsw i64 %i, 1
1118 %cond = icmp slt i64 %i.next, %n
1119 br i1 %cond, label %for.body, label %for.end
1125 ; PR27626_3: Ensure a strided load is not moved before a dependent (negative
1126 ; distance) strided store.
1128 ; void PR27626_3(struct pair *p, int z, int n) {
1129 ; for (int i = 0; i < n; i++) {
1130 ; p[i + 1].y = p[i].x;
1135 define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
1136 ; CHECK-LABEL: @PR27626_3(
1137 ; CHECK-NEXT: entry:
1138 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
1139 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
1140 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
1141 ; CHECK-NEXT: [[MIN_ITERS_CHECK_NOT:%.*]] = icmp samesign ugt i64 [[SMAX]], [[TMP1]]
1142 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_NOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH:%.*]]
1144 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
1145 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
1146 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], -1
1147 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[SMAX]], [[TMP4]]
1148 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
1149 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP3]], i64 [[N_MOD_VF]]
1150 ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
1151 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
1152 ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
1153 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
1154 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
1155 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1156 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1157 ; CHECK: vector.body:
1158 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1159 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1160 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
1161 ; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
1162 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[PAIR_I32:%.*]], ptr [[P:%.*]], i64 [[INDEX]], i32 0
1163 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[INDEX]], i32 1
1164 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], <vscale x 4 x i64> [[TMP12]], i32 1
1165 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4
1166 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
1167 ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
1168 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true))
1169 ; CHECK-NEXT: [[WIDE_VEC1:%.*]] = load <vscale x 8 x i32>, ptr [[TMP14]], align 4
1170 ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]])
1171 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
1172 ; CHECK-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[VEC_PHI]]
1173 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
1174 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
1175 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1176 ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
1177 ; CHECK: middle.block:
1178 ; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP18]])
1179 ; CHECK-NEXT: br label [[SCALAR_PH]]
1181 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1182 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP20]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1183 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1185 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1186 ; CHECK-NEXT: [[S:%.*]] = phi i32 [ [[TMP23:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1187 ; CHECK-NEXT: [[I_PLUS_1:%.*]] = add nuw nsw i64 [[I]], 1
1188 ; CHECK-NEXT: [[P_I_X:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 0
1189 ; CHECK-NEXT: [[P_I_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I]], i32 1
1190 ; CHECK-NEXT: [[P_I_PLUS_1_Y:%.*]] = getelementptr inbounds [[PAIR_I32]], ptr [[P]], i64 [[I_PLUS_1]], i32 1
1191 ; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[P_I_X]], align 4
1192 ; CHECK-NEXT: store i32 [[TMP21]], ptr [[P_I_PLUS_1_Y]], align 4
1193 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[P_I_Y]], align 4
1194 ; CHECK-NEXT: [[TMP23]] = add nsw i32 [[TMP22]], [[S]]
1195 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
1196 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
1197 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END:%.*]], !llvm.loop [[LOOP29:![0-9]+]]
1199 ; CHECK-NEXT: ret i32 [[TMP23]]
1205 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
1206 %s = phi i32 [ %2, %for.body ], [ 0, %entry ]
1207 %i_plus_1 = add nuw nsw i64 %i, 1
1208 %p_i.x = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 0
1209 %p_i.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i, i32 1
1210 %p_i_plus_1.y = getelementptr inbounds %pair.i32, ptr %p, i64 %i_plus_1, i32 1
1211 %0 = load i32, ptr %p_i.x, align 4
1212 store i32 %0, ptr %p_i_plus_1.y, align 4
1213 %1 = load i32, ptr %p_i.y, align 4
1214 %2 = add nsw i32 %1, %s
1215 %i.next = add nuw nsw i64 %i, 1
1216 %cond = icmp slt i64 %i.next, %n
1217 br i1 %cond, label %for.body, label %for.end
1220 %3 = phi i32 [ %2, %for.body ]
1224 ; PR27626_4: Ensure we form an interleaved group for strided stores in the
1225 ; presence of a write-after-write dependence. We create a group for
1226 ; (2) and (3) while excluding (1).
1228 ; void PR27626_4(int *a, int x, int y, int z, int n) {
1229 ; for (int i = 0; i < n; i += 2) {
1232 ; a[i + 1] = z; // (3)
1236 define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
1237 ; CHECK-LABEL: @PR27626_4(
1238 ; CHECK-NEXT: entry:
1239 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 2)
1240 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -1
1241 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
1242 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
1243 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
1244 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
1245 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]]
1246 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1248 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
1249 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4
1250 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]]
1251 ; CHECK-NEXT: [[IND_END:%.*]] = shl nuw i64 [[N_VEC]], 1
1252 ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
1253 ; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2
1254 ; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
1255 ; CHECK-NEXT: [[TMP9:%.*]] = shl <vscale x 4 x i64> [[TMP8]], splat (i64 1)
1256 ; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP6]], 3
1257 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
1258 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1259 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
1260 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1261 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Y:%.*]], i64 0
1262 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1263 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
1264 ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1265 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1266 ; CHECK: vector.body:
1267 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1268 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP9]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1269 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
1270 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true))
1271 ; CHECK-NEXT: [[P:%.*]] = extractelement <vscale x 4 x ptr> [[TMP13]], i64 0
1272 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[BROADCAST_SPLAT2]], <vscale x 4 x i32> [[BROADCAST_SPLAT4]])
1273 ; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[P]], align 4
1274 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
1275 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
1276 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1277 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
1278 ; CHECK: middle.block:
1279 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
1280 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
1282 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1283 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1285 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1286 ; CHECK-NEXT: [[I_PLUS_1:%.*]] = or disjoint i64 [[I]], 1
1287 ; CHECK-NEXT: [[A_I:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
1288 ; CHECK-NEXT: [[A_I_PLUS_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_PLUS_1]]
1289 ; CHECK-NEXT: store i32 [[Y]], ptr [[A_I]], align 4
1290 ; CHECK-NEXT: store i32 [[Z]], ptr [[A_I_PLUS_1]], align 4
1291 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 2
1292 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
1293 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP31:![0-9]+]]
1295 ; CHECK-NEXT: ret void
1301 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
1302 %i_plus_1 = add i64 %i, 1
1303 %a_i = getelementptr inbounds i32, ptr %a, i64 %i
1304 %a_i_plus_1 = getelementptr inbounds i32, ptr %a, i64 %i_plus_1
1305 store i32 %x, ptr %a_i, align 4
1306 store i32 %y, ptr %a_i, align 4
1307 store i32 %z, ptr %a_i_plus_1, align 4
1308 %i.next = add nuw nsw i64 %i, 2
1309 %cond = icmp slt i64 %i.next, %n
1310 br i1 %cond, label %for.body, label %for.end
1316 ; PR27626_5: Ensure we do not form an interleaved group for strided stores in
1317 ; the presence of a write-after-write dependence.
1319 ; void PR27626_5(int *a, int x, int y, int z, int n) {
1320 ; for (int i = 3; i < n; i += 2) {
1327 define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
1328 ; CHECK-LABEL: @PR27626_5(
1329 ; CHECK-NEXT: entry:
1330 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 5)
1331 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -4
1332 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
1333 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
1334 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
1335 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
1336 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i64 [[TMP2]], [[TMP4]]
1337 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1339 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
1340 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP5]], -4
1341 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP2]], [[DOTNEG]]
1342 ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[N_VEC]], 1
1343 ; CHECK-NEXT: [[IND_END:%.*]] = or disjoint i64 [[TMP6]], 3
1344 ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
1345 ; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
1346 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
1347 ; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 4 x i64> [[TMP9]], splat (i64 1)
1348 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[TMP10]], splat (i64 3)
1349 ; CHECK-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP7]], 3
1350 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
1351 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1352 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
1353 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1354 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Y:%.*]], i64 0
1355 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1356 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z:%.*]], i64 0
1357 ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1358 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1359 ; CHECK: vector.body:
1360 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1361 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1362 ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], splat (i64 -1)
1363 ; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], splat (i64 -3)
1364 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[VEC_IND]]
1365 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
1366 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP14]]
1367 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> [[TMP16]], i32 4, <vscale x 4 x i1> splat (i1 true))
1368 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT2]], <vscale x 4 x ptr> [[TMP17]], i32 4, <vscale x 4 x i1> splat (i1 true))
1369 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT4]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true))
1370 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
1371 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
1372 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1373 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
1374 ; CHECK: middle.block:
1375 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
1376 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
1378 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1379 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1381 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1382 ; CHECK-NEXT: [[A_I:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
1383 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[A]], i64 [[I]]
1384 ; CHECK-NEXT: [[A_I_MINUS_1:%.*]] = getelementptr i8, ptr [[TMP19]], i64 -4
1385 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[A]], i64 [[I]]
1386 ; CHECK-NEXT: [[A_I_MINUS_3:%.*]] = getelementptr i8, ptr [[TMP20]], i64 -12
1387 ; CHECK-NEXT: store i32 [[X]], ptr [[A_I_MINUS_1]], align 4
1388 ; CHECK-NEXT: store i32 [[Y]], ptr [[A_I_MINUS_3]], align 4
1389 ; CHECK-NEXT: store i32 [[Z]], ptr [[A_I]], align 4
1390 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 2
1391 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
1392 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP33:![0-9]+]]
1394 ; CHECK-NEXT: ret void
1400 %i = phi i64 [ %i.next, %for.body ], [ 3, %entry ]
1401 %i_minus_1 = sub i64 %i, 1
1402 %i_minus_3 = sub i64 %i_minus_1, 2
1403 %a_i = getelementptr inbounds i32, ptr %a, i64 %i
1404 %a_i_minus_1 = getelementptr inbounds i32, ptr %a, i64 %i_minus_1
1405 %a_i_minus_3 = getelementptr inbounds i32, ptr %a, i64 %i_minus_3
1406 store i32 %x, ptr %a_i_minus_1, align 4
1407 store i32 %y, ptr %a_i_minus_3, align 4
1408 store i32 %z, ptr %a_i, align 4
1409 %i.next = add nuw nsw i64 %i, 2
1410 %cond = icmp slt i64 %i.next, %n
1411 br i1 %cond, label %for.body, label %for.end
1417 ; PR34743: Ensure that a cast which needs to sink after a load that belongs to
1418 ; an interleaved group, indeeded gets sunk.
1420 ; void PR34743(short *a, int *b, int n) {
1421 ; for (int i = 0, iv = 0; iv < n; i++, iv += 2) {
1422 ; b[i] = a[iv] * a[iv+1] * a[iv+2];
1426 define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
1427 ; CHECK-LABEL: @PR34743(
1428 ; CHECK-NEXT: entry:
1429 ; CHECK-NEXT: [[DOTPRE:%.*]] = load i16, ptr [[A:%.*]], align 2
1430 ; CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[N:%.*]], 1
1431 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw i64 [[TMP0]], 1
1432 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
1433 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
1434 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
1435 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
1436 ; CHECK: vector.memcheck:
1437 ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 1
1438 ; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
1439 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP5]]
1440 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP6]], i64 4
1441 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr nuw i8, ptr [[A]], i64 2
1442 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
1443 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[TMP7]], i64 6
1444 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
1445 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
1446 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
1447 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
1449 ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
1450 ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP8]], -4
1451 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], [[DOTNEG]]
1452 ; CHECK-NEXT: [[IND_END:%.*]] = shl i64 [[N_VEC]], 1
1453 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
1454 ; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
1455 ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32()
1456 ; CHECK-NEXT: [[TMP12:%.*]] = shl nuw nsw i32 [[TMP11]], 2
1457 ; CHECK-NEXT: [[TMP13:%.*]] = add nsw i32 [[TMP12]], -1
1458 ; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[DOTPRE]], i32 [[TMP13]]
1459 ; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
1460 ; CHECK-NEXT: [[TMP15:%.*]] = shl <vscale x 4 x i64> [[TMP14]], splat (i64 1)
1461 ; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i64 [[TMP9]], 3
1462 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
1463 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1464 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1465 ; CHECK: vector.body:
1466 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1467 ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_MASKED_GATHER4:%.*]], [[VECTOR_BODY]] ]
1468 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[TMP15]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1469 ; CHECK-NEXT: [[TMP18:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 1)
1470 ; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
1471 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP18]]
1472 ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison), !alias.scope [[META34:![0-9]+]]
1473 ; CHECK-NEXT: [[TMP21:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
1474 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP19]]
1475 ; CHECK-NEXT: [[WIDE_MASKED_GATHER4]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP22]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i16> poison), !alias.scope [[META34]]
1476 ; CHECK-NEXT: [[TMP23:%.*]] = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> [[VECTOR_RECUR]], <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]], i32 -1)
1477 ; CHECK-NEXT: [[TMP24:%.*]] = sext <vscale x 4 x i16> [[TMP23]] to <vscale x 4 x i32>
1478 ; CHECK-NEXT: [[TMP25:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]] to <vscale x 4 x i32>
1479 ; CHECK-NEXT: [[TMP26:%.*]] = mul nsw <vscale x 4 x i32> [[TMP24]], [[TMP21]]
1480 ; CHECK-NEXT: [[TMP27:%.*]] = mul nsw <vscale x 4 x i32> [[TMP26]], [[TMP25]]
1481 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
1482 ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP27]], ptr [[TMP28]], align 4, !alias.scope [[META37:![0-9]+]], !noalias [[META34]]
1483 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
1484 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
1485 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1486 ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
1487 ; CHECK: middle.block:
1488 ; CHECK-NEXT: [[TMP30:%.*]] = call i32 @llvm.vscale.i32()
1489 ; CHECK-NEXT: [[TMP31:%.*]] = shl nuw nsw i32 [[TMP30]], 2
1490 ; CHECK-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP31]], -1
1491 ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]], i32 [[TMP32]]
1492 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
1493 ; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
1495 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
1496 ; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_MEMCHECK]] ]
1497 ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ [[DOTPRE]], [[ENTRY]] ], [ [[DOTPRE]], [[VECTOR_MEMCHECK]] ]
1498 ; CHECK-NEXT: br label [[LOOP:%.*]]
1500 ; CHECK-NEXT: [[TMP33:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LOAD2:%.*]], [[LOOP]] ]
1501 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV2:%.*]], [[LOOP]] ]
1502 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[I1:%.*]], [[LOOP]] ]
1503 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP33]] to i32
1504 ; CHECK-NEXT: [[I1]] = add nuw nsw i64 [[I]], 1
1505 ; CHECK-NEXT: [[IV1:%.*]] = or disjoint i64 [[IV]], 1
1506 ; CHECK-NEXT: [[IV2]] = add nuw nsw i64 [[IV]], 2
1507 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[IV1]]
1508 ; CHECK-NEXT: [[LOAD1:%.*]] = load i16, ptr [[GEP1]], align 4
1509 ; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD1]] to i32
1510 ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[IV2]]
1511 ; CHECK-NEXT: [[LOAD2]] = load i16, ptr [[GEP2]], align 4
1512 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[LOAD2]] to i32
1513 ; CHECK-NEXT: [[MUL01:%.*]] = mul nsw i32 [[CONV]], [[CONV1]]
1514 ; CHECK-NEXT: [[MUL012:%.*]] = mul nsw i32 [[MUL01]], [[CONV2]]
1515 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
1516 ; CHECK-NEXT: store i32 [[MUL012]], ptr [[ARRAYIDX5]], align 4
1517 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], [[N]]
1518 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[END]], label [[LOOP]], !llvm.loop [[LOOP40:![0-9]+]]
1520 ; CHECK-NEXT: ret void
1523 %.pre = load i16, ptr %a
1527 %0 = phi i16 [ %.pre, %entry ], [ %load2, %loop ]
1528 %iv = phi i64 [ 0, %entry ], [ %iv2, %loop ]
1529 %i = phi i64 [ 0, %entry ], [ %i1, %loop ]
1530 %conv = sext i16 %0 to i32
1531 %i1 = add nuw nsw i64 %i, 1
1532 %iv1 = add nuw nsw i64 %iv, 1
1533 %iv2 = add nuw nsw i64 %iv, 2
1534 %gep1 = getelementptr inbounds i16, ptr %a, i64 %iv1
1535 %load1 = load i16, ptr %gep1, align 4
1536 %conv1 = sext i16 %load1 to i32
1537 %gep2 = getelementptr inbounds i16, ptr %a, i64 %iv2
1538 %load2 = load i16, ptr %gep2, align 4
1539 %conv2 = sext i16 %load2 to i32
1540 %mul01 = mul nsw i32 %conv, %conv1
1541 %mul012 = mul nsw i32 %mul01, %conv2
1542 %arrayidx5 = getelementptr inbounds i32, ptr %b, i64 %i
1543 store i32 %mul012, ptr %arrayidx5
1544 %exitcond = icmp eq i64 %iv, %n
1545 br i1 %exitcond, label %end, label %loop
1551 attributes #1 = { "target-features"="+sve" vscale_range(1, 16) }
1552 attributes #0 = { "unsafe-fp-math"="true" "target-features"="+sve" vscale_range(1, 16) }