3 ; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -debug -disable-output %s 2>&1 | FileCheck --check-prefix=DBG %s
4 ; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -S %s | FileCheck %s
6 ; DBG-LABEL: 'test_scalarize_call'
7 ; DBG: VPlan 'Initial VPlan for VF={1},UF>=1' {
8 ; DBG-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
9 ; DBG-NEXT: Live-in vp<[[VEC_TC:%.+]]> = vector-trip-count
10 ; DBG-NEXT: vp<[[TC:%.+]]> = original trip-count
12 ; DBG-NEXT: ir-bb<entry>:
13 ; DBG-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (1000 + (-1 * %start))
14 ; DBG-NEXT: No successors
16 ; DBG-NEXT: vector.ph:
17 ; DBG-NEXT: Successor(s): vector loop
19 ; DBG-NEXT: <x1> vector loop: {
20 ; DBG-NEXT: vector.body:
21 ; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
22 ; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%start> + vp<[[CAN_IV]]> * ir<1>
23 ; DBG-NEXT: vp<[[IV_STEPS:%.]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
24 ; DBG-NEXT: CLONE ir<%min> = call @llvm.smin.i32(vp<[[IV_STEPS]]>, ir<65535>)
25 ; DBG-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%dst>, vp<[[IV_STEPS]]>
26 ; DBG-NEXT: CLONE store ir<%min>, ir<%arrayidx>
27 ; DBG-NEXT: EMIT vp<[[INC:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
28 ; DBG-NEXT: EMIT branch-on-count vp<[[INC]]>, vp<[[VEC_TC]]>
29 ; DBG-NEXT: No successors
32 define void @test_scalarize_call(i32 %start, ptr %dst) {
33 ; CHECK-LABEL: @test_scalarize_call(
35 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
36 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 %start, [[INDEX]]
37 ; CHECK-NEXT: [[INDUCTION:%.*]] = add i32 [[OFFSET_IDX]], 0
38 ; CHECK-NEXT: [[INDUCTION1:%.*]] = add i32 [[OFFSET_IDX]], 1
39 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.smin.i32(i32 [[INDUCTION]], i32 65535)
40 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[INDUCTION1]], i32 65535)
41 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[INDUCTION]]
42 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[INDUCTION1]]
43 ; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 8
44 ; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP4]], align 8
45 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
46 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], %n.vec
47 ; CHECK-NEXT: br i1 [[TMP5]], label %middle.block, label %vector.body
48 ; CHECK: middle.block:
54 %iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
55 %min = tail call i32 @llvm.smin.i32(i32 %iv, i32 65535)
56 %arrayidx = getelementptr inbounds i32 , ptr %dst, i32 %iv
57 store i32 %min, ptr %arrayidx, align 8
58 %iv.next = add nsw i32 %iv, 1
59 %tobool.not = icmp eq i32 %iv.next, 1000
60 br i1 %tobool.not, label %exit, label %loop
66 declare i32 @llvm.smin.i32(i32, i32)
69 ; DBG-LABEL: 'test_scalarize_with_branch_cond'
71 ; DBG: Live-in vp<[[VFxUF:%.+]]> = VF * UF
72 ; DBG-NEXT: Live-in vp<[[VEC_TC:%.+]]> = vector-trip-count
73 ; DBG-NEXT: Live-in ir<1000> = original trip-count
75 ; DBG-NEXT: vector.ph:
76 ; DBG-NEXT: Successor(s): vector loop
78 ; DBG-NEXT: <x1> vector loop: {
79 ; DBG-NEXT: vector.body:
80 ; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
81 ; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<false> + vp<[[CAN_IV]]> * ir<true>
82 ; DBG-NEXT: vp<[[STEPS1:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<true>
83 ; DBG-NEXT: Successor(s): pred.store
85 ; DBG-NEXT: <xVFxUF> pred.store: {
86 ; DBG-NEXT: pred.store.entry:
87 ; DBG-NEXT: BRANCH-ON-MASK vp<[[STEPS1]]>
88 ; DBG-NEXT: Successor(s): pred.store.if, pred.store.continue
90 ; DBG-NEXT: pred.store.if:
91 ; DBG-NEXT: vp<[[STEPS2:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
92 ; DBG-NEXT: CLONE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[STEPS2]]>
93 ; DBG-NEXT: CLONE ir<%l> = load ir<%gep.src>
94 ; DBG-NEXT: CLONE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[STEPS2]]>
95 ; DBG-NEXT: CLONE store ir<%l>, ir<%gep.dst>
96 ; DBG-NEXT: Successor(s): pred.store.continue
98 ; DBG-NEXT: pred.store.continue:
99 ; DBG-NEXT: No successors
101 ; DBG-NEXT: Successor(s): cond.false.1
103 ; DBG-NEXT: cond.false.1:
104 ; DBG-NEXT: EMIT vp<[[CAN_IV_INC:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
105 ; DBG-NEXT: EMIT branch-on-count vp<[[CAN_IV_INC]]>, vp<[[VEC_TC]]>
106 ; DBG-NEXT: No successors
108 ; DBG-NEXT: Successor(s): middle.block
110 ; DBG-NEXT: middle.block:
111 ; DBG-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1000>, vp<[[VEC_TC]]>
112 ; DBG-NEXT: EMIT branch-on-cond vp<[[CMP]]>
113 ; DBG-NEXT: Successor(s): ir-bb<exit>, scalar.ph
115 ; DBG-NEXT: ir-bb<exit>:
116 ; DBG-NEXT: No successors
118 ; DBG-NEXT: scalar.ph:
119 ; DBG-NEXT: Successor(s): ir-bb<loop.header>
121 ; DBG-NEXT: ir-bb<loop.header>:
122 ; DBG-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
123 ; DBG-NEXT: IR %d = phi i1 [ false, %entry ], [ %d.next, %loop.latch ]
124 ; DBG-NEXT: IR %d.next = xor i1 %d, true
125 ; DBG-NEXT: No successors
128 define void @test_scalarize_with_branch_cond(ptr %src, ptr %dst) {
129 ; CHECK-LABEL: @test_scalarize_with_branch_cond(
130 ; CHECK: vector.body:
131 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %pred.store.continue5 ]
132 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDEX]] to i1
133 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i1 false, [[TMP0]]
134 ; CHECK-NEXT: [[INDUCTION:%.*]] = add i1 [[OFFSET_IDX]], false
135 ; CHECK-NEXT: [[INDUCTION3:%.*]] = add i1 [[OFFSET_IDX]], true
136 ; CHECK-NEXT: br i1 [[INDUCTION]], label %pred.store.if, label %pred.store.continue
137 ; CHECK: pred.store.if:
138 ; CHECK-NEXT: [[INDUCTION4:%.*]] = add i64 [[INDEX]], 0
139 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr %src, i64 [[INDUCTION4]]
140 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
141 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr %dst, i64 [[INDUCTION4]]
142 ; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP1]], align 4
143 ; CHECK-NEXT: br label %pred.store.continue
144 ; CHECK: pred.store.continue:
145 ; CHECK-NEXT: br i1 [[INDUCTION3]], label %pred.store.if4, label %pred.store.continue5
146 ; CHECK: pred.store.if4:
147 ; CHECK-NEXT: [[INDUCTION5:%.*]] = add i64 [[INDEX]], 1
148 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr %src, i64 [[INDUCTION5]]
149 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
150 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr %dst, i64 [[INDUCTION5]]
151 ; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP2]], align 4
152 ; CHECK-NEXT: br label %pred.store.continue5
153 ; CHECK: pred.store.continue5:
154 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
155 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
156 ; CHECK-NEXT: br i1 [[TMP9]], label %middle.block, label %vector.body
157 ; CHECK: middle.block:
160 br label %loop.header
163 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
164 %d = phi i1 [ false, %entry ], [ %d.next, %loop.latch ]
165 %d.next = xor i1 %d, true
166 br i1 %d, label %cond.false, label %loop.latch
169 %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv
170 %gep.dst = getelementptr inbounds i32, ptr %dst, i64 %iv
171 %l = load i32, ptr %gep.src, align 4
172 store i32 %l, ptr %gep.dst
176 %iv.next = add nsw i64 %iv, 1
177 %ec = icmp eq i64 %iv.next, 1000
178 br i1 %ec, label %exit, label %loop.header
184 ; Make sure the widened induction gets replaced by scalar-steps for plans
185 ; including the scalar VF, if it is used in first-order recurrences.
187 ; DBG-LABEL: 'first_order_recurrence_using_induction'
188 ; DBG: VPlan 'Initial VPlan for VF={1},UF>=1' {
189 ; DBG-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
190 ; DBG-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
191 ; DBG-NEXT: vp<[[TC:%.+]]> = original trip-count
193 ; DBG-NEXT: ir-bb<entry>:
194 ; DBG-NEXT: EMIT vp<[[TC]]> = EXPAND SCEV (zext i32 (1 smax %n) to i64)
195 ; DBG-NEXT: No successors
197 ; DBG-NEXT: vector.ph:
198 ; DBG-NEXT: SCALAR-CAST vp<[[CAST:%.+]]> = trunc ir<1> to i32
199 ; DBG-NEXT: Successor(s): vector loop
201 ; DBG-NEXT: <x1> vector loop: {
202 ; DBG-NEXT: vector.body:
203 ; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
204 ; DBG-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<[[SCALAR_STEPS:.+]]>
205 ; DBG-NEXT: SCALAR-CAST vp<[[TRUNC_IV:%.+]]> = trunc vp<[[CAN_IV]]> to i32
206 ; DBG-NEXT: vp<[[SCALAR_STEPS]]> = SCALAR-STEPS vp<[[TRUNC_IV]]>, vp<[[CAST]]>
207 ; DBG-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%for>, vp<[[SCALAR_STEPS]]>
208 ; DBG-NEXT: CLONE store vp<[[SPLICE]]>, ir<%dst>
209 ; DBG-NEXT: EMIT vp<[[IV_INC:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
210 ; DBG-NEXT: EMIT branch-on-count vp<[[IV_INC]]>, vp<[[VTC]]>
211 ; DBG-NEXT: No successors
213 ; DBG-NEXT: Successor(s): middle.block
215 ; DBG-NEXT: middle.block:
216 ; DBG-NEXT: EMIT vp<[[RESUME_1:%.+]]> = extract-from-end vp<[[SCALAR_STEPS]]>, ir<1>
217 ; DBG-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq vp<[[TC]]>, vp<[[VEC_TC]]>
218 ; DBG-NEXT: EMIT branch-on-cond vp<[[CMP]]>
219 ; DBG-NEXT: Successor(s): ir-bb<exit>, scalar.ph
221 ; DBG-NEXT: ir-bb<exit>:
222 ; DBG-NEXT: No successors
224 ; DBG-NEXT: scalar.ph:
225 ; DBG-NEXT: EMIT vp<[[RESUME_P:%.*]]> = resume-phi vp<[[RESUME_1]]>, ir<0>
226 ; DBG-NEXT: Successor(s): ir-bb<loop>
228 ; DBG-NEXT: ir-bb<loop>:
229 ; DBG-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
230 ; DBG-NEXT: IR %for = phi i32 [ 0, %entry ], [ %iv.trunc, %loop ] (extra operand: vp<[[RESUME_P]]> from scalar.ph)
231 ; DBG: IR %ec = icmp slt i32 %iv.next.trunc, %n
232 ; DBG-NEXT: No successors
235 define void @first_order_recurrence_using_induction(i32 %n, ptr %dst) {
236 ; CHECK-LABEL: @first_order_recurrence_using_induction(
237 ; CHECK: vector.body:
238 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
239 ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDUCTION1:%.*]], %vector.body ]
240 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[INDEX]] to i32
241 ; CHECK-NEXT: [[INDUCTION:%.*]] = add i32 [[TMP3]], 0
242 ; CHECK-NEXT: [[INDUCTION1]] = add i32 [[TMP3]], 1
243 ; CHECK-NEXT: store i32 [[INDUCTION]], ptr [[DST]], align 4
244 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
245 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], %n.vec
246 ; CHECK-NEXT: br i1 [[TMP4]], label %middle.block, label %vector.body
247 ; CHECK: middle.block:
253 %iv = phi i64 [ 0, %entry ],[ %iv.next, %loop ]
254 %for = phi i32 [ 0, %entry ], [ %iv.trunc, %loop ]
255 %iv.trunc = trunc i64 %iv to i32
256 store i32 %for, ptr %dst
257 %iv.next = add nuw nsw i64 %iv, 1
258 %iv.next.trunc = trunc i64 %iv.next to i32
259 %ec = icmp slt i32 %iv.next.trunc, %n
260 br i1 %ec, label %loop, label %exit
266 define i16 @reduction_with_casts() {
267 ; CHECK-LABEL: define i16 @reduction_with_casts() {
268 ; CHECK: vector.body:
269 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.+]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.+]] ]
270 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
271 ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
272 ; CHECK-NEXT: [[TMP0:%.*]] = and i32 [[VEC_PHI]], 65535
273 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[VEC_PHI1]], 65535
274 ; CHECK-NEXT: [[TMP2]] = add i32 [[TMP0]], 1
275 ; CHECK-NEXT: [[TMP3]] = add i32 [[TMP1]], 1
276 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
277 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9998
278 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
279 ; CHECK: middle.block:
280 ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP3]], [[TMP2]]
281 ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label %scalar.ph
287 %count.0.in1 = phi i32 [ 0, %entry ], [ %add, %loop ]
288 %iv = phi i16 [ 1, %entry ], [ %iv.next, %loop ]
289 %conv1 = and i32 %count.0.in1, 65535
290 %add = add nuw nsw i32 %conv1, 1
291 %iv.next = add i16 %iv, 1
292 %cmp = icmp eq i16 %iv.next, 10000
293 br i1 %cmp, label %exit, label %loop
296 %add.lcssa = phi i32 [ %add, %loop ]
297 %count.0 = trunc i32 %add.lcssa to i16
301 define void @scalarize_ptrtoint(ptr %src, ptr %dst) {
302 ; CHECK: vector.body:
303 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
304 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
305 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr %src, i64 [[TMP1]]
306 ; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP3]], align 8
307 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP5]] to i64
308 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 10
309 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP9]] to ptr
310 ; CHECK-NEXT: store ptr [[TMP11]], ptr %dst, align 8
311 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
312 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
313 ; CHECK-NEXT: br i1 [[TMP12]], label %middle.block, label %vector.body
319 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
320 %gep = getelementptr ptr, ptr %src, i64 %iv
321 %l = load ptr, ptr %gep, align 8
322 %cast = ptrtoint ptr %l to i64
323 %add = add i64 %cast, 10
324 %cast.2 = inttoptr i64 %add to ptr
325 store ptr %cast.2, ptr %dst, align 8
326 %iv.next = add i64 %iv, 1
327 %ec = icmp eq i64 %iv.next, 0
328 br i1 %ec, label %exit, label %loop
334 define void @pr76986_trunc_sext_interleaving_only(i16 %arg, ptr noalias %src, ptr noalias %dst) {
335 ; CHECK-LABEL: define void @pr76986_trunc_sext_interleaving_only(
336 ; CHECK: vector.body:
337 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
338 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
339 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
340 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr %src, i64 [[TMP0]]
341 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr %src, i64 [[TMP1]]
342 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1
343 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1
344 ; CHECK-NEXT: [[TMP6:%.*]] = sext i8 [[TMP4]] to i32
345 ; CHECK-NEXT: [[TMP7:%.*]] = sext i8 [[TMP5]] to i32
346 ; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP6]] to i16
347 ; CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[TMP7]] to i16
348 ; CHECK-NEXT: [[TMP10:%.*]] = sdiv i16 [[TMP8]], %arg
349 ; CHECK-NEXT: [[TMP11:%.*]] = sdiv i16 [[TMP9]], %arg
350 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr %dst, i64 [[TMP0]]
351 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i16, ptr %dst, i64 [[TMP1]]
352 ; CHECK-NEXT: store i16 [[TMP10]], ptr [[TMP12]], align 2
353 ; CHECK-NEXT: store i16 [[TMP11]], ptr [[TMP13]], align 2
354 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
355 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 14934
356 ; CHECK-NEXT: br i1 [[TMP14]], label %middle.block, label %vector.body
362 %iv = phi i64 [ 0, %bb ], [ %iv.next, %loop ]
363 %gep.src = getelementptr inbounds i8, ptr %src, i64 %iv
364 %l = load i8, ptr %gep.src
365 %sext = sext i8 %l to i32
366 %trunc = trunc i32 %sext to i16
367 %sdiv = sdiv i16 %trunc, %arg
368 %gep.dst = getelementptr inbounds i16, ptr %dst, i64 %iv
369 store i16 %sdiv, ptr %gep.dst
370 %iv.next = add i64 %iv, 1
371 %icmp = icmp ult i64 %iv, 14933
372 br i1 %icmp, label %loop, label %exit