[ARM] More MVE compare vector splat combines for ANDs
[llvm-complete.git] / test / Transforms / LoopVectorize / if-pred-stores.ll
blob353087f66e537486690e1c98843ffa553e2f3dff
1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -verify-loop-info -simplifycfg < %s | FileCheck %s --check-prefix=UNROLL
3 ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -verify-loop-info < %s | FileCheck %s --check-prefix=UNROLL-NOSIMPLIFY
4 ; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=2 -force-vector-interleave=1 -loop-vectorize -verify-loop-info -simplifycfg < %s | FileCheck %s --check-prefix=VEC
6 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
8 ; Test predication of stores.
9 define i32 @test(i32* nocapture %f) #0 {
10 ; UNROLL-LABEL: @test(
11 ; UNROLL-NEXT:  entry:
12 ; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
13 ; UNROLL:       vector.body:
14 ; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
15 ; UNROLL-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
16 ; UNROLL-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
17 ; UNROLL-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[INDUCTION]]
18 ; UNROLL-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDUCTION1]]
19 ; UNROLL-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
20 ; UNROLL-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
21 ; UNROLL-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], 100
22 ; UNROLL-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], 100
23 ; UNROLL-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
24 ; UNROLL:       pred.store.if:
25 ; UNROLL-NEXT:    [[TMP6:%.*]] = add nsw i32 [[TMP2]], 20
26 ; UNROLL-NEXT:    store i32 [[TMP6]], i32* [[TMP0]], align 4
27 ; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE]]
28 ; UNROLL:       pred.store.continue:
29 ; UNROLL-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
30 ; UNROLL:       pred.store.if2:
31 ; UNROLL-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP3]], 20
32 ; UNROLL-NEXT:    store i32 [[TMP7]], i32* [[TMP1]], align 4
33 ; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE3]]
34 ; UNROLL:       pred.store.continue3:
35 ; UNROLL-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
36 ; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
37 ; UNROLL-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
38 ; UNROLL:       middle.block:
39 ; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
40 ; UNROLL-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
41 ; UNROLL:       for.body:
42 ; UNROLL-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 128, [[MIDDLE_BLOCK]] ]
43 ; UNROLL-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
44 ; UNROLL-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
45 ; UNROLL-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 100
46 ; UNROLL-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
47 ; UNROLL:       if.then:
48 ; UNROLL-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], 20
49 ; UNROLL-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
50 ; UNROLL-NEXT:    br label [[FOR_INC]]
51 ; UNROLL:       for.inc:
52 ; UNROLL-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
53 ; UNROLL-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
54 ; UNROLL-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !2
55 ; UNROLL:       for.end:
56 ; UNROLL-NEXT:    ret i32 0
58 ; UNROLL-NOSIMPLIFY-LABEL: @test(
59 ; UNROLL-NOSIMPLIFY-NEXT:  entry:
60 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
61 ; UNROLL-NOSIMPLIFY:       vector.ph:
62 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
63 ; UNROLL-NOSIMPLIFY:       vector.body:
64 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
65 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
66 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
67 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[INDUCTION]]
68 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDUCTION1]]
69 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
70 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
71 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], 100
72 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], 100
73 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
74 ; UNROLL-NOSIMPLIFY:       pred.store.if:
75 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = add nsw i32 [[TMP2]], 20
76 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP6]], i32* [[TMP0]], align 4
77 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
78 ; UNROLL-NOSIMPLIFY:       pred.store.continue:
79 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
80 ; UNROLL-NOSIMPLIFY:       pred.store.if2:
81 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP3]], 20
82 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP7]], i32* [[TMP1]], align 4
83 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE3]]
84 ; UNROLL-NOSIMPLIFY:       pred.store.continue3:
85 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
86 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
87 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
88 ; UNROLL-NOSIMPLIFY:       middle.block:
89 ; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
90 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
91 ; UNROLL-NOSIMPLIFY:       scalar.ph:
92 ; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 128, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
93 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
94 ; UNROLL-NOSIMPLIFY:       for.body:
95 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
96 ; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
97 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
98 ; UNROLL-NOSIMPLIFY-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 100
99 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
100 ; UNROLL-NOSIMPLIFY:       if.then:
101 ; UNROLL-NOSIMPLIFY-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], 20
102 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
103 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
104 ; UNROLL-NOSIMPLIFY:       for.inc:
105 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
106 ; UNROLL-NOSIMPLIFY-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
107 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !2
108 ; UNROLL-NOSIMPLIFY:       for.end:
109 ; UNROLL-NOSIMPLIFY-NEXT:    ret i32 0
111 ; VEC-LABEL: @test(
112 ; VEC-NEXT:  entry:
113 ; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
114 ; VEC:       vector.body:
115 ; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
116 ; VEC-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> undef, i64 [[INDEX]], i32 0
117 ; VEC-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> undef, <2 x i32> zeroinitializer
118 ; VEC-NEXT:    [[INDUCTION:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1>
119 ; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
120 ; VEC-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 [[TMP0]]
121 ; VEC-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
122 ; VEC-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <2 x i32>*
123 ; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, <2 x i32>* [[TMP3]], align 4
124 ; VEC-NEXT:    [[TMP4:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], <i32 100, i32 100>
125 ; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
126 ; VEC-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
127 ; VEC:       pred.store.if:
128 ; VEC-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0
129 ; VEC-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP6]], 20
130 ; VEC-NEXT:    store i32 [[TMP7]], i32* [[TMP1]], align 4
131 ; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
132 ; VEC:       pred.store.continue:
133 ; VEC-NEXT:    [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
134 ; VEC-NEXT:    br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
135 ; VEC:       pred.store.if1:
136 ; VEC-NEXT:    [[TMP9:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1
137 ; VEC-NEXT:    [[TMP10:%.*]] = add nsw i32 [[TMP9]], 20
138 ; VEC-NEXT:    [[TMP11:%.*]] = add i64 [[INDEX]], 1
139 ; VEC-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[TMP11]]
140 ; VEC-NEXT:    store i32 [[TMP10]], i32* [[TMP12]], align 4
141 ; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
142 ; VEC:       pred.store.continue2:
143 ; VEC-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
144 ; VEC-NEXT:    [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
145 ; VEC-NEXT:    br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
146 ; VEC:       middle.block:
147 ; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 128, 128
148 ; VEC-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
149 ; VEC:       for.body:
150 ; VEC-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 128, [[MIDDLE_BLOCK]] ]
151 ; VEC-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
152 ; VEC-NEXT:    [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
153 ; VEC-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 100
154 ; VEC-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
155 ; VEC:       if.then:
156 ; VEC-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP14]], 20
157 ; VEC-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX]], align 4
158 ; VEC-NEXT:    br label [[FOR_INC]]
159 ; VEC:       for.inc:
160 ; VEC-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
161 ; VEC-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
162 ; VEC-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !2
163 ; VEC:       for.end:
164 ; VEC-NEXT:    ret i32 0
166 entry:
167   br label %for.body
171 for.body:
172   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
173   %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
174   %0 = load i32, i32* %arrayidx, align 4
175   %cmp1 = icmp sgt i32 %0, 100
176   br i1 %cmp1, label %if.then, label %for.inc
178 if.then:
179   %add = add nsw i32 %0, 20
180   store i32 %add, i32* %arrayidx, align 4
181   br label %for.inc
183 for.inc:
184   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
185   %exitcond = icmp eq i64 %indvars.iv.next, 128
186   br i1 %exitcond, label %for.end, label %for.body
188 for.end:
189   ret i32 0
192 ; Track basic blocks when unrolling conditional blocks. This code used to assert
193 ; because we did not update the phi nodes with the proper predecessor in the
194 ; vectorized loop body.
195 ; PR18724
197 define void @bug18724(i1 %cond) {
198 ; UNROLL-LABEL: @bug18724(
199 ; UNROLL-NEXT:  entry:
200 ; UNROLL-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
201 ; UNROLL-NEXT:    call void @llvm.assume(i1 [[TMP0]])
202 ; UNROLL-NEXT:    br label [[FOR_BODY14:%.*]]
203 ; UNROLL:       for.body14:
204 ; UNROLL-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ undef, [[ENTRY:%.*]] ]
205 ; UNROLL-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ undef, [[ENTRY]] ]
206 ; UNROLL-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]]
207 ; UNROLL-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
208 ; UNROLL-NEXT:    br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]]
209 ; UNROLL:       if.then18:
210 ; UNROLL-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
211 ; UNROLL-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
212 ; UNROLL-NEXT:    br label [[FOR_INC23]]
213 ; UNROLL:       for.inc23:
214 ; UNROLL-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
215 ; UNROLL-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
216 ; UNROLL-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
217 ; UNROLL-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
218 ; UNROLL-NEXT:    call void @llvm.assume(i1 [[CMP13]])
219 ; UNROLL-NEXT:    br label [[FOR_BODY14]]
221 ; UNROLL-NOSIMPLIFY-LABEL: @bug18724(
222 ; UNROLL-NOSIMPLIFY-NEXT:  entry:
223 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY9:%.*]]
224 ; UNROLL-NOSIMPLIFY:       for.body9:
225 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND:%.*]], label [[FOR_INC26:%.*]], label [[FOR_BODY14_PREHEADER:%.*]]
226 ; UNROLL-NOSIMPLIFY:       for.body14.preheader:
227 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
228 ; UNROLL-NOSIMPLIFY:       vector.ph:
229 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
230 ; UNROLL-NOSIMPLIFY:       vector.body:
231 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
232 ; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ]
233 ; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI5:%.*]], [[PRED_STORE_CONTINUE4]] ]
234 ; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = add i64 undef, [[INDEX]]
235 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0
236 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 1
237 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION]]
238 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION1]]
239 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
240 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
241 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 undef, label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
242 ; UNROLL-NOSIMPLIFY:       pred.store.if:
243 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[TMP0]], align 4
244 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
245 ; UNROLL-NOSIMPLIFY:       pred.store.continue:
246 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 undef, label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]]
247 ; UNROLL-NOSIMPLIFY:       pred.store.if3:
248 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[TMP1]], align 4
249 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE4]]
250 ; UNROLL-NOSIMPLIFY:       pred.store.continue4:
251 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = add nsw i32 [[VEC_PHI]], 1
252 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = add nsw i32 [[VEC_PHI2]], 1
253 ; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI]] = select i1 undef, i32 [[VEC_PHI]], i32 [[TMP4]]
254 ; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI5]] = select i1 undef, i32 [[VEC_PHI2]], i32 [[TMP5]]
255 ; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX6:%.*]] = add i64 undef, [[INDEX]]
256 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = trunc i64 [[OFFSET_IDX6]] to i32
257 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION7:%.*]] = add i32 [[TMP6]], 0
258 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION8:%.*]] = add i32 [[TMP6]], 1
259 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
260 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
261 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3
262 ; UNROLL-NOSIMPLIFY:       middle.block:
263 ; UNROLL-NOSIMPLIFY-NEXT:    [[BIN_RDX:%.*]] = add i32 [[PREDPHI5]], [[PREDPHI]]
264 ; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1, 0
265 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_INC26_LOOPEXIT:%.*]], label [[SCALAR_PH]]
266 ; UNROLL-NOSIMPLIFY:       scalar.ph:
267 ; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ undef, [[FOR_BODY14_PREHEADER]] ]
268 ; UNROLL-NOSIMPLIFY-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ undef, [[FOR_BODY14_PREHEADER]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
269 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY14:%.*]]
270 ; UNROLL-NOSIMPLIFY:       for.body14:
271 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
272 ; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
273 ; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]]
274 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
275 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]]
276 ; UNROLL-NOSIMPLIFY:       if.then18:
277 ; UNROLL-NOSIMPLIFY-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
278 ; UNROLL-NOSIMPLIFY-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
279 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC23]]
280 ; UNROLL-NOSIMPLIFY:       for.inc23:
281 ; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
282 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
283 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
284 ; UNROLL-NOSIMPLIFY-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
285 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP13]], label [[FOR_BODY14]], label [[FOR_INC26_LOOPEXIT]], !llvm.loop !4
286 ; UNROLL-NOSIMPLIFY:       for.inc26.loopexit:
287 ; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2_LCSSA:%.*]] = phi i32 [ [[INEWCHUNKS_2]], [[FOR_INC23]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
288 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC26]]
289 ; UNROLL-NOSIMPLIFY:       for.inc26:
290 ; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_1_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY9]] ], [ [[INEWCHUNKS_2_LCSSA]], [[FOR_INC26_LOOPEXIT]] ]
291 ; UNROLL-NOSIMPLIFY-NEXT:    unreachable
293 ; VEC-LABEL: @bug18724(
294 ; VEC-NEXT:  entry:
295 ; VEC-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
296 ; VEC-NEXT:    call void @llvm.assume(i1 [[TMP0]])
297 ; VEC-NEXT:    br label [[FOR_BODY14:%.*]]
298 ; VEC:       for.body14:
299 ; VEC-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ undef, [[ENTRY:%.*]] ]
300 ; VEC-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ undef, [[ENTRY]] ]
301 ; VEC-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]]
302 ; VEC-NEXT:    [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4
303 ; VEC-NEXT:    br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]]
304 ; VEC:       if.then18:
305 ; VEC-NEXT:    store i32 2, i32* [[ARRAYIDX16]], align 4
306 ; VEC-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
307 ; VEC-NEXT:    br label [[FOR_INC23]]
308 ; VEC:       for.inc23:
309 ; VEC-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
310 ; VEC-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
311 ; VEC-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
312 ; VEC-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
313 ; VEC-NEXT:    call void @llvm.assume(i1 [[CMP13]])
314 ; VEC-NEXT:    br label [[FOR_BODY14]]
316 entry:
317   br label %for.body9
319 for.body9:
320   br i1 %cond, label %for.inc26, label %for.body14
322 for.body14:
323   %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ undef, %for.body9 ]
324   %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ undef, %for.body9 ]
325   %arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 %indvars.iv3
326   %tmp = load i32, i32* %arrayidx16, align 4
327   br i1 undef, label %if.then18, label %for.inc23
329 if.then18:
330   store i32 2, i32* %arrayidx16, align 4
331   %inc21 = add nsw i32 %iNewChunks.120, 1
332   br label %for.inc23
334 for.inc23:
335   %iNewChunks.2 = phi i32 [ %inc21, %if.then18 ], [ %iNewChunks.120, %for.body14 ]
336   %indvars.iv.next4 = add nsw i64 %indvars.iv3, 1
337   %tmp1 = trunc i64 %indvars.iv3 to i32
338   %cmp13 = icmp slt i32 %tmp1, 0
339   br i1 %cmp13, label %for.body14, label %for.inc26
341 for.inc26:
342   %iNewChunks.1.lcssa = phi i32 [ undef, %for.body9 ], [ %iNewChunks.2, %for.inc23 ]
343   unreachable
346 ; In the test below, it's more profitable for the expression feeding the
347 ; conditional store to remain scalar. Since we can only type-shrink vector
348 ; types, we shouldn't try to represent the expression in a smaller type.
350 define void @minimal_bit_widths(i1 %c) {
351 ; UNROLL-LABEL: @minimal_bit_widths(
352 ; UNROLL-NEXT:  entry:
353 ; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
354 ; UNROLL:       vector.body:
355 ; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
356 ; UNROLL-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
357 ; UNROLL-NEXT:    [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 0
358 ; UNROLL-NEXT:    [[INDUCTION4:%.*]] = add i64 [[OFFSET_IDX]], -1
359 ; UNROLL-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE6]]
360 ; UNROLL:       pred.store.if:
361 ; UNROLL-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
362 ; UNROLL-NEXT:    [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
363 ; UNROLL-NEXT:    [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
364 ; UNROLL-NEXT:    [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
365 ; UNROLL-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
366 ; UNROLL-NEXT:    store i8 [[TMP3]], i8* [[TMP0]], align 1
367 ; UNROLL-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
368 ; UNROLL-NEXT:    [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION2]]
369 ; UNROLL-NEXT:    [[TMP5:%.*]] = load i8, i8* [[TMP4]], align 1
370 ; UNROLL-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
371 ; UNROLL-NEXT:    [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
372 ; UNROLL-NEXT:    store i8 [[TMP7]], i8* [[TMP4]], align 1
373 ; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE6]]
374 ; UNROLL:       pred.store.continue6:
375 ; UNROLL-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
376 ; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
377 ; UNROLL-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3
378 ; UNROLL:       middle.block:
379 ; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
380 ; UNROLL-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
381 ; UNROLL:       for.body:
382 ; UNROLL-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ undef, [[MIDDLE_BLOCK]] ]
383 ; UNROLL-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ undef, [[MIDDLE_BLOCK]] ]
384 ; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
385 ; UNROLL-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
386 ; UNROLL-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
387 ; UNROLL:       if.then:
388 ; UNROLL-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
389 ; UNROLL-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
390 ; UNROLL-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
391 ; UNROLL-NEXT:    br label [[FOR_INC]]
392 ; UNROLL:       for.inc:
393 ; UNROLL-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
394 ; UNROLL-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
395 ; UNROLL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
396 ; UNROLL-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !4
397 ; UNROLL:       for.end:
398 ; UNROLL-NEXT:    ret void
400 ; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths(
401 ; UNROLL-NOSIMPLIFY-NEXT:  entry:
402 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
403 ; UNROLL-NOSIMPLIFY:       vector.ph:
404 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
405 ; UNROLL-NOSIMPLIFY:       vector.body:
406 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
407 ; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
408 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION3:%.*]] = add i64 [[OFFSET_IDX]], 0
409 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION4:%.*]] = add i64 [[OFFSET_IDX]], -1
410 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
411 ; UNROLL-NOSIMPLIFY:       pred.store.if:
412 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
413 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
414 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
415 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
416 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
417 ; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP3]], i8* [[TMP0]], align 1
418 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
419 ; UNROLL-NOSIMPLIFY:       pred.store.continue:
420 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
421 ; UNROLL-NOSIMPLIFY:       pred.store.if5:
422 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
423 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION2]]
424 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i8, i8* [[TMP4]], align 1
425 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
426 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
427 ; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP7]], i8* [[TMP4]], align 1
428 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE6]]
429 ; UNROLL-NOSIMPLIFY:       pred.store.continue6:
430 ; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
431 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
432 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !5
433 ; UNROLL-NOSIMPLIFY:       middle.block:
434 ; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
435 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
436 ; UNROLL-NOSIMPLIFY:       scalar.ph:
437 ; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
438 ; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi i64 [ undef, [[MIDDLE_BLOCK]] ], [ undef, [[ENTRY]] ]
439 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
440 ; UNROLL-NOSIMPLIFY:       for.body:
441 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
442 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
443 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
444 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
445 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
446 ; UNROLL-NOSIMPLIFY:       if.then:
447 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
448 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
449 ; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
450 ; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
451 ; UNROLL-NOSIMPLIFY:       for.inc:
452 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
453 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
454 ; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
455 ; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !6
456 ; UNROLL-NOSIMPLIFY:       for.end:
457 ; UNROLL-NOSIMPLIFY-NEXT:    ret void
459 ; VEC-LABEL: @minimal_bit_widths(
460 ; VEC-NEXT:  entry:
461 ; VEC-NEXT:    [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <2 x i1> undef, i1 [[C:%.*]], i32 0
462 ; VEC-NEXT:    [[BROADCAST_SPLAT6:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT5]], <2 x i1> undef, <2 x i32> zeroinitializer
463 ; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
464 ; VEC:       vector.body:
465 ; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE8:%.*]] ]
466 ; VEC-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> undef, i64 [[INDEX]], i32 0
467 ; VEC-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> undef, <2 x i32> zeroinitializer
468 ; VEC-NEXT:    [[INDUCTION:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1>
469 ; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
470 ; VEC-NEXT:    [[OFFSET_IDX:%.*]] = sub i64 undef, [[INDEX]]
471 ; VEC-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <2 x i64> undef, i64 [[OFFSET_IDX]], i32 0
472 ; VEC-NEXT:    [[BROADCAST_SPLAT3:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT2]], <2 x i64> undef, <2 x i32> zeroinitializer
473 ; VEC-NEXT:    [[INDUCTION4:%.*]] = add <2 x i64> [[BROADCAST_SPLAT3]], <i64 0, i64 -1>
474 ; VEC-NEXT:    [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
475 ; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
476 ; VEC-NEXT:    [[TMP3:%.*]] = getelementptr i8, i8* [[TMP2]], i32 0
477 ; VEC-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to <2 x i8>*
478 ; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i8>, <2 x i8>* [[TMP4]], align 1
479 ; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT6]], i32 0
480 ; VEC-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
481 ; VEC:       pred.store.if:
482 ; VEC-NEXT:    [[TMP6:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 0
483 ; VEC-NEXT:    [[TMP7:%.*]] = zext i8 [[TMP6]] to i32
484 ; VEC-NEXT:    [[TMP8:%.*]] = trunc i32 [[TMP7]] to i8
485 ; VEC-NEXT:    store i8 [[TMP8]], i8* [[TMP2]], align 1
486 ; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
487 ; VEC:       pred.store.continue:
488 ; VEC-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT6]], i32 1
489 ; VEC-NEXT:    br i1 [[TMP9]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8]]
490 ; VEC:       pred.store.if7:
491 ; VEC-NEXT:    [[TMP10:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 1
492 ; VEC-NEXT:    [[TMP11:%.*]] = zext i8 [[TMP10]] to i32
493 ; VEC-NEXT:    [[TMP12:%.*]] = trunc i32 [[TMP11]] to i8
494 ; VEC-NEXT:    [[TMP13:%.*]] = add i64 [[INDEX]], 1
495 ; VEC-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* undef, i64 [[TMP13]]
496 ; VEC-NEXT:    store i8 [[TMP12]], i8* [[TMP14]], align 1
497 ; VEC-NEXT:    br label [[PRED_STORE_CONTINUE8]]
498 ; VEC:       pred.store.continue8:
499 ; VEC-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
500 ; VEC-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], undef
501 ; VEC-NEXT:    br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4
502 ; VEC:       middle.block:
503 ; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 undef, undef
504 ; VEC-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[FOR_BODY:%.*]]
505 ; VEC:       for.body:
506 ; VEC-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ undef, [[MIDDLE_BLOCK]] ]
507 ; VEC-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ undef, [[MIDDLE_BLOCK]] ]
508 ; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, i8* undef, i64 [[TMP0]]
509 ; VEC-NEXT:    [[TMP3:%.*]] = load i8, i8* [[TMP2]], align 1
510 ; VEC-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
511 ; VEC:       if.then:
512 ; VEC-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
513 ; VEC-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
514 ; VEC-NEXT:    store i8 [[TMP5]], i8* [[TMP2]], align 1
515 ; VEC-NEXT:    br label [[FOR_INC]]
516 ; VEC:       for.inc:
517 ; VEC-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
518 ; VEC-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
519 ; VEC-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
520 ; VEC-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !5
521 ; VEC:       for.end:
522 ; VEC-NEXT:    ret void
524 entry:
525   br label %for.body
527 for.body:
528   %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ]
529   %tmp1 = phi i64 [ %tmp7, %for.inc ], [ undef, %entry ]
530   %tmp2 = getelementptr i8, i8* undef, i64 %tmp0
531   %tmp3 = load i8, i8* %tmp2, align 1
532   br i1 %c, label %if.then, label %for.inc
534 if.then:
535   %tmp4 = zext i8 %tmp3 to i32
536   %tmp5 = trunc i32 %tmp4 to i8
537   store i8 %tmp5, i8* %tmp2, align 1
538   br label %for.inc
540 for.inc:
541   %tmp6 = add nuw nsw i64 %tmp0, 1
542   %tmp7 = add i64 %tmp1, -1
543   %tmp8 = icmp eq i64 %tmp7, 0
544   br i1 %tmp8, label %for.end, label %for.body
546 for.end:
547   ret void