1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt -passes='loop-vectorize' -force-vector-width=2 -S < %s | FileCheck %s
4 ; Forcing VF=2 to trigger vector code gen
6 ; This is a test case that let's vectorizer's code gen to generate
7 ; more than one BasicBlocks in the loop body (emulated masked scatter)
8 ; for those targets that do not support masked scatter. Broadcast
9 ; code generation was previously dependent on loop body being
10 ; a single basic block and this test case exposed incorrect code gen
11 ; resulting in an assert in IL verification.
13 @a = external global [2 x i16], align 1
15 define void @f1(ptr noalias %b, i1 %c, i32 %start) {
16 ; CHECK-LABEL: define void @f1
17 ; CHECK-SAME: (ptr noalias [[B:%.*]], i1 [[C:%.*]], i32 [[START:%.*]]) {
19 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[START]], 1
20 ; CHECK-NEXT: [[SMIN1:%.*]] = call i32 @llvm.smin.i32(i32 [[START]], i32 1)
21 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[SMIN1]]
22 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2
23 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
24 ; CHECK: vector.scevcheck:
25 ; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[START]], i32 1)
26 ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[START]], [[SMIN]]
27 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[START]] to i16
28 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP2]] to i16
29 ; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 1, i16 [[TMP4]])
30 ; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0
31 ; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1
32 ; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[MUL_RESULT]]
33 ; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt i16 [[TMP5]], [[TMP3]]
34 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
35 ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i32 [[TMP2]], 65535
36 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
37 ; CHECK-NEXT: br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
39 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2
40 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]]
41 ; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[START]], [[N_VEC]]
42 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C]], i64 0
43 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
44 ; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
45 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
47 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
48 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[START]], [[INDEX]]
49 ; CHECK-NEXT: [[TMP11:%.*]] = trunc i32 [[OFFSET_IDX]] to i16
50 ; CHECK-NEXT: [[TMP12:%.*]] = add i16 [[TMP11]], 0
51 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP10]], i32 0
52 ; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
53 ; CHECK: pred.store.if:
54 ; CHECK-NEXT: store i32 10, ptr [[B]], align 1
55 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
56 ; CHECK: pred.store.continue:
57 ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP10]], i32 1
58 ; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
59 ; CHECK: pred.store.if2:
60 ; CHECK-NEXT: store i32 10, ptr [[B]], align 1
61 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE3]]
62 ; CHECK: pred.store.continue3:
63 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[TMP12]]
64 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 0
65 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP16]], i32 -1
66 ; CHECK-NEXT: store <2 x i16> zeroinitializer, ptr [[TMP17]], align 1
67 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
68 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
69 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
70 ; CHECK: middle.block:
71 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]]
72 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
74 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[VECTOR_SCEVCHECK]] ], [ [[START]], [[ENTRY:%.*]] ]
75 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
77 ; CHECK-NEXT: [[TMP19:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[LAND_END:%.*]] ]
78 ; CHECK-NEXT: br i1 [[C]], label [[LAND_END]], label [[LAND_RHS:%.*]]
80 ; CHECK-NEXT: store i32 10, ptr [[B]], align 1
81 ; CHECK-NEXT: br label [[LAND_END]]
83 ; CHECK-NEXT: [[T:%.*]] = trunc i32 [[TMP19]] to i16
84 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[T]]
85 ; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX]], align 1
86 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[TMP19]], -1
87 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP19]], 1
88 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
90 ; CHECK-NEXT: ret void
95 for.body: ; preds = %land.end, %entry
96 %0 = phi i32 [ %start, %entry ], [ %dec, %land.end ]
97 br i1 %c, label %land.end, label %land.rhs
99 land.rhs: ; preds = %for.body
100 store i32 10, ptr %b, align 1
103 land.end: ; preds = %land.rhs, %for.body
104 %t = trunc i32 %0 to i16
105 %arrayidx = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 %t
106 store i16 0, ptr %arrayidx, align 1
107 %dec = add nsw i32 %0, -1
108 %cmp = icmp sgt i32 %0, 1
109 br i1 %cmp, label %for.body, label %exit
115 define void @f2(ptr noalias %b, i1 %c, i32 %start) {
116 ; CHECK-LABEL: define void @f2
117 ; CHECK-SAME: (ptr noalias [[B:%.*]], i1 [[C:%.*]], i32 [[START:%.*]]) {
119 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[START]], 1
120 ; CHECK-NEXT: [[SMIN1:%.*]] = call i32 @llvm.smin.i32(i32 [[START]], i32 1)
121 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[SMIN1]]
122 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2
123 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
124 ; CHECK: vector.scevcheck:
125 ; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[START]], i32 1)
126 ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[START]], [[SMIN]]
127 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[START]] to i16
128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP2]] to i16
129 ; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 1, i16 [[TMP4]])
130 ; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0
131 ; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1
132 ; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[MUL_RESULT]]
133 ; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt i16 [[TMP5]], [[TMP3]]
134 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]]
135 ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i32 [[TMP2]], 65535
136 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
137 ; CHECK-NEXT: br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
139 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2
140 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]]
141 ; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[START]], [[N_VEC]]
142 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
143 ; CHECK: vector.body:
144 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
145 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[START]], [[INDEX]]
146 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[OFFSET_IDX]] to i16
147 ; CHECK-NEXT: [[TMP11:%.*]] = add i16 [[TMP10]], 0
148 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[TMP11]]
149 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i16, ptr [[TMP12]], i32 0
150 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i16, ptr [[TMP13]], i32 -1
151 ; CHECK-NEXT: store <2 x i16> zeroinitializer, ptr [[TMP14]], align 1
152 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
153 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
154 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
155 ; CHECK: middle.block:
156 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]]
157 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
159 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[VECTOR_SCEVCHECK]] ], [ [[START]], [[ENTRY:%.*]] ]
160 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
162 ; CHECK-NEXT: [[TMP16:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[LAND_END:%.*]] ]
163 ; CHECK-NEXT: br i1 [[C]], label [[LAND_END]], label [[LAND_RHS:%.*]]
165 ; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 1
166 ; CHECK-NEXT: br label [[LAND_END]]
168 ; CHECK-NEXT: [[TMP18:%.*]] = trunc i32 [[TMP16]] to i16
169 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[TMP18]]
170 ; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX]], align 1
171 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[TMP16]], -1
172 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP16]], 1
173 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
175 ; CHECK-NEXT: ret void
180 for.body: ; preds = %land.end, %entry
181 %0 = phi i32 [ %start, %entry ], [ %dec, %land.end ]
182 br i1 %c, label %land.end, label %land.rhs
184 land.rhs: ; preds = %for.body
185 %1 = load i32, ptr %b, align 1
188 land.end: ; preds = %land.rhs, %for.body
189 %2 = trunc i32 %0 to i16
190 %arrayidx = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 %2
191 store i16 0, ptr %arrayidx, align 1
192 %dec = add nsw i32 %0, -1
193 %cmp = icmp sgt i32 %0, 1
194 br i1 %cmp, label %for.body, label %exit