1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
2 ; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
4 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
6 define void @test_pr47927_lshr_const_shift_ops(ptr %dst, i32 %f) {
7 ; CHECK-LABEL: define void @test_pr47927_lshr_const_shift_ops
8 ; CHECK-SAME: (ptr [[DST:%.*]], i32 [[F:%.*]]) {
10 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
12 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[F]], i64 0
13 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
14 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
16 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
17 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
18 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
19 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[BROADCAST_SPLAT]], <i32 18, i32 18, i32 18, i32 18>
20 ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i8>
21 ; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP0]] to i64
22 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP3]]
23 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
24 ; CHECK-NEXT: store <4 x i8> [[TMP2]], ptr [[TMP5]], align 8
25 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
26 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
27 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
28 ; CHECK: middle.block:
29 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
31 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
32 ; CHECK-NEXT: br label [[LOOP:%.*]]
34 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
35 ; CHECK-NEXT: [[L:%.*]] = lshr i32 [[F]], 18
36 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
37 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
38 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
39 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
40 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
41 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
42 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
43 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
45 ; CHECK-NEXT: ret void
51 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
53 %l.t = trunc i32 %l to i8
54 %iv.ext = zext i8 %iv to i64
55 %gep = getelementptr inbounds i8, ptr %dst, i64 %iv.ext
56 store i8 %l.t, ptr %gep, align 8
57 %iv.next = add i8 %iv, 1
58 %conv = zext i8 %iv.next to i32
59 %c = icmp ne i32 %conv, 100
60 br i1 %c, label %loop, label %exit
66 define void @test_shl_const_shift_ops(ptr %dst, i32 %f) {
67 ; CHECK-LABEL: define void @test_shl_const_shift_ops
68 ; CHECK-SAME: (ptr [[DST:%.*]], i32 [[F:%.*]]) {
70 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
72 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[F]], i64 0
73 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
74 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
76 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
77 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
78 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
79 ; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[BROADCAST_SPLAT]], <i32 18, i32 18, i32 18, i32 18>
80 ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i8>
81 ; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP0]] to i64
82 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP3]]
83 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
84 ; CHECK-NEXT: store <4 x i8> [[TMP2]], ptr [[TMP5]], align 8
85 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
86 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
87 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
88 ; CHECK: middle.block:
89 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
91 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
92 ; CHECK-NEXT: br label [[LOOP:%.*]]
94 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
95 ; CHECK-NEXT: [[L:%.*]] = shl i32 [[F]], 18
96 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
97 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
98 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
99 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
100 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
101 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
102 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
103 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
105 ; CHECK-NEXT: ret void
111 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
113 %l.t = trunc i32 %l to i8
114 %iv.ext = zext i8 %iv to i64
115 %gep = getelementptr inbounds i8, ptr %dst, i64 %iv.ext
116 store i8 %l.t, ptr %gep, align 8
117 %iv.next = add i8 %iv, 1
118 %conv = zext i8 %iv.next to i32
119 %c = icmp ne i32 %conv, 100
120 br i1 %c, label %loop, label %exit
126 define void @test_ashr_const_shift_ops(ptr %dst, i32 %f) {
127 ; CHECK-LABEL: define void @test_ashr_const_shift_ops
128 ; CHECK-SAME: (ptr [[DST:%.*]], i32 [[F:%.*]]) {
130 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
132 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[F]], i64 0
133 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
134 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
135 ; CHECK: vector.body:
136 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
137 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
138 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
139 ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[BROADCAST_SPLAT]], <i32 18, i32 18, i32 18, i32 18>
140 ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i8>
141 ; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[TMP0]] to i64
142 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP3]]
143 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
144 ; CHECK-NEXT: store <4 x i8> [[TMP2]], ptr [[TMP5]], align 8
145 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
146 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
147 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
148 ; CHECK: middle.block:
149 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
151 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
152 ; CHECK-NEXT: br label [[LOOP:%.*]]
154 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
155 ; CHECK-NEXT: [[L:%.*]] = ashr i32 [[F]], 18
156 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
157 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
158 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
159 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
160 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
161 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
162 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
163 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
165 ; CHECK-NEXT: ret void
171 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
173 %l.t = trunc i32 %l to i8
174 %iv.ext = zext i8 %iv to i64
175 %gep = getelementptr inbounds i8, ptr %dst, i64 %iv.ext
176 store i8 %l.t, ptr %gep, align 8
177 %iv.next = add i8 %iv, 1
178 %conv = zext i8 %iv.next to i32
179 %c = icmp ne i32 %conv, 100
180 br i1 %c, label %loop, label %exit
186 define void @test_shl_const_shifted_op(ptr %dst, i32 %f) {
187 ; CHECK-LABEL: define void @test_shl_const_shifted_op
188 ; CHECK-SAME: (ptr [[DST:%.*]], i32 [[F:%.*]]) {
190 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
192 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
193 ; CHECK: vector.body:
194 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
195 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
196 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
197 ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
198 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP1]]
199 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 0
200 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
201 ; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i32>
202 ; CHECK-NEXT: [[TMP5:%.*]] = shl <4 x i32> <i32 19, i32 19, i32 19, i32 19>, [[TMP4]]
203 ; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i32> [[TMP5]] to <4 x i8>
204 ; CHECK-NEXT: store <4 x i8> [[TMP6]], ptr [[TMP3]], align 8
205 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
206 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
207 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
208 ; CHECK: middle.block:
209 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
211 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
212 ; CHECK-NEXT: br label [[LOOP:%.*]]
214 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
215 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
216 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]]
217 ; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
218 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LV]] to i32
219 ; CHECK-NEXT: [[L:%.*]] = shl i32 19, [[ZEXT]]
220 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
221 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
222 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
223 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
224 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
225 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]]
227 ; CHECK-NEXT: ret void
233 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
234 %iv.ext = zext i8 %iv to i64
235 %gep = getelementptr inbounds i8, ptr %dst, i64 %iv.ext
236 %lv = load i8, ptr %gep
237 %zext = zext i8 %lv to i32
238 %l = shl i32 19, %zext
239 %l.t = trunc i32 %l to i8
240 store i8 %l.t, ptr %gep, align 8
241 %iv.next = add i8 %iv, 1
242 %conv = zext i8 %iv.next to i32
243 %c = icmp ne i32 %conv, 100
244 br i1 %c, label %loop, label %exit
251 define void @test_lshr_by_18(ptr %A) {
252 ; CHECK-LABEL: define void @test_lshr_by_18
253 ; CHECK-SAME: (ptr [[A:%.*]]) {
255 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
257 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
258 ; CHECK: vector.body:
259 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
260 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
261 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
262 ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
263 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]]
264 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 0
265 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
266 ; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i32>
267 ; CHECK-NEXT: [[TMP5:%.*]] = lshr <4 x i32> [[TMP4]], <i32 18, i32 18, i32 18, i32 18>
268 ; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i32> [[TMP5]] to <4 x i8>
269 ; CHECK-NEXT: store <4 x i8> [[TMP6]], ptr [[TMP3]], align 8
270 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
271 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
272 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
273 ; CHECK: middle.block:
274 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
276 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
277 ; CHECK-NEXT: br label [[LOOP:%.*]]
279 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
280 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
281 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]]
282 ; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
283 ; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32
284 ; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 18
285 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
286 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
287 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
288 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
289 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
290 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP11:![0-9]+]]
292 ; CHECK-NEXT: ret void
298 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
299 %iv.ext = zext i8 %iv to i64
300 %gep = getelementptr inbounds i8, ptr %A, i64 %iv.ext
301 %lv = load i8, ptr %gep
302 %lv.ext = zext i8 %lv to i32
303 %l = lshr i32 %lv.ext, 18
304 %l.t = trunc i32 %l to i8
305 store i8 %l.t, ptr %gep, align 8
306 %iv.next = add i8 %iv, 1
307 %conv = zext i8 %iv.next to i32
308 %c = icmp ne i32 %conv, 100
309 br i1 %c, label %loop, label %exit
315 define void @test_lshr_by_4(ptr %A) {
316 ; CHECK-LABEL: define void @test_lshr_by_4
317 ; CHECK-SAME: (ptr [[A:%.*]]) {
319 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
321 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
322 ; CHECK: vector.body:
323 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
324 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i8
325 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[OFFSET_IDX]], 0
326 ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
327 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP1]]
328 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 0
329 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
330 ; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i16>
331 ; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i16> [[TMP4]] to <4 x i32>
332 ; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i32> [[TMP5]] to <4 x i16>
333 ; CHECK-NEXT: [[TMP7:%.*]] = lshr <4 x i16> [[TMP6]], <i16 4, i16 4, i16 4, i16 4>
334 ; CHECK-NEXT: [[TMP8:%.*]] = zext <4 x i16> [[TMP7]] to <4 x i32>
335 ; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i32> [[TMP8]] to <4 x i16>
336 ; CHECK-NEXT: [[TMP10:%.*]] = trunc <4 x i16> [[TMP9]] to <4 x i8>
337 ; CHECK-NEXT: store <4 x i8> [[TMP10]], ptr [[TMP3]], align 8
338 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
339 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
340 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
341 ; CHECK: middle.block:
342 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
344 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
345 ; CHECK-NEXT: br label [[LOOP:%.*]]
347 ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
348 ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64
349 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]]
350 ; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1
351 ; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32
352 ; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 4
353 ; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8
354 ; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8
355 ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
356 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32
357 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100
358 ; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
360 ; CHECK-NEXT: ret void
366 %iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
367 %iv.ext = zext i8 %iv to i64
368 %gep = getelementptr inbounds i8, ptr %A, i64 %iv.ext
369 %lv = load i8, ptr %gep
370 %lv.ext = zext i8 %lv to i32
371 %l = lshr i32 %lv.ext, 4
372 %l.t = trunc i32 %l to i8
373 store i8 %l.t, ptr %gep, align 8
374 %iv.next = add i8 %iv, 1
375 %conv = zext i8 %iv.next to i32
376 %c = icmp ne i32 %conv, 100
377 br i1 %c, label %loop, label %exit
383 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
384 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
385 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
386 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
387 ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
388 ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
389 ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
390 ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
391 ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
392 ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
393 ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
394 ; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
395 ; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
396 ; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}