1 ; RUN: opt -mtriple=thumbv7-unknown-linux-gnueabihf -arm-parallel-dsp -dce %s -S -o - | FileCheck %s
3 ; CHECK-LABEL: first_mul_invalid
4 ; CHECK: [[ADDR_IN_MINUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -1
5 ; CHECK: [[LD_IN_MINUS_1:%[^ ]+]] = load i16, i16* [[ADDR_IN_MINUS_1]], align 2
6 ; CHECK: [[IN_MINUS_1:%[^ ]+]] = sext i16 [[LD_IN_MINUS_1]] to i32
7 ; CHECK: [[ADDR_B_PLUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 1
8 ; CHECK: [[LD_B_PLUS_1:%[^ ]+]] = load i16, i16* [[ADDR_B_PLUS_1]], align 2
9 ; CHECK: [[B_PLUS_1:%[^ ]+]] = sext i16 [[LD_B_PLUS_1]] to i32
10 ; CHECK: [[MUL0:%[^ ]+]] = mul nsw i32 [[B_PLUS_1]], [[IN_MINUS_1]]
11 ; CHECK: [[ADD0:%[^ ]+]] = add i32 [[MUL0]], %call
12 ; CHECK: [[ADDR_IN_MINUS_3:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -3
13 ; CHECK: [[CAST_ADDR_IN_MINUS_3:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_3]] to i32*
14 ; CHECK: [[IN_MINUS_3:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_3]], align 2
15 ; CHECK: [[ADDR_B_PLUS_2:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 2
16 ; CHECK: [[CAST_ADDR_B_PLUS_2:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_2]] to i32*
17 ; CHECK: [[B_PLUS_2:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_2]], align 2
18 ; CHECK: [[ADDR_IN_MINUS_5:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -5
19 ; CHECK: [[CAST_ADDR_IN_MINUS_5:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_5]] to i32*
20 ; CHECK: [[IN_MINUS_5:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_5]], align 2
21 ; CHECK: [[ADDR_B_PLUS_4:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 4
22 ; CHECK: [[CAST_ADDR_B_PLUS_4:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_4]] to i32*
23 ; CHECK: [[B_PLUS_4:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_4]], align 2
24 ; CHECK: [[ACC:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN_MINUS_5]], i32 [[B_PLUS_4]], i32 [[ADD0]])
25 ; CHECK: [[RES:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN_MINUS_3]], i32 [[B_PLUS_2]], i32 [[ACC]])
26 ; CHECK: ret i32 [[RES]]
27 define i32 @first_mul_invalid(i16* nocapture readonly %in, i16* nocapture readonly %b) {
29 %0 = load i16, i16* %in, align 2
30 %conv = sext i16 %0 to i32
31 %1 = load i16, i16* %b, align 2
32 %conv2 = sext i16 %1 to i32
33 %call = tail call i32 @bar(i32 %conv, i32 %conv2)
34 %arrayidx3 = getelementptr inbounds i16, i16* %in, i32 -1
35 %2 = load i16, i16* %arrayidx3, align 2
36 %conv4 = sext i16 %2 to i32
37 %arrayidx5 = getelementptr inbounds i16, i16* %b, i32 1
38 %3 = load i16, i16* %arrayidx5, align 2
39 %conv6 = sext i16 %3 to i32
40 %mul = mul nsw i32 %conv6, %conv4
41 %add = add i32 %mul, %call
42 %arrayidx7 = getelementptr inbounds i16, i16* %in, i32 -2
43 %4 = load i16, i16* %arrayidx7, align 2
44 %conv8 = sext i16 %4 to i32
45 %arrayidx9 = getelementptr inbounds i16, i16* %b, i32 2
46 %5 = load i16, i16* %arrayidx9, align 2
47 %conv10 = sext i16 %5 to i32
48 %mul11 = mul nsw i32 %conv10, %conv8
49 %add12 = add i32 %add, %mul11
50 %arrayidx13 = getelementptr inbounds i16, i16* %in, i32 -3
51 %6 = load i16, i16* %arrayidx13, align 2
52 %conv14 = sext i16 %6 to i32
53 %arrayidx15 = getelementptr inbounds i16, i16* %b, i32 3
54 %7 = load i16, i16* %arrayidx15, align 2
55 %conv16 = sext i16 %7 to i32
56 %mul17 = mul nsw i32 %conv16, %conv14
57 %add18 = add i32 %add12, %mul17
58 %arrayidx19 = getelementptr inbounds i16, i16* %in, i32 -4
59 %8 = load i16, i16* %arrayidx19, align 2
60 %conv20 = sext i16 %8 to i32
61 %arrayidx21 = getelementptr inbounds i16, i16* %b, i32 4
62 %9 = load i16, i16* %arrayidx21, align 2
63 %conv22 = sext i16 %9 to i32
64 %mul23 = mul nsw i32 %conv22, %conv20
65 %add24 = add i32 %add18, %mul23
66 %arrayidx25 = getelementptr inbounds i16, i16* %in, i32 -5
67 %10 = load i16, i16* %arrayidx25, align 2
68 %conv26 = sext i16 %10 to i32
69 %arrayidx27 = getelementptr inbounds i16, i16* %b, i32 5
70 %11 = load i16, i16* %arrayidx27, align 2
71 %conv28 = sext i16 %11 to i32
72 %mul29 = mul nsw i32 %conv28, %conv26
73 %add30 = add i32 %add24, %mul29
77 ; CHECK-LABEL: with_no_acc_input
78 ; CHECK: [[ADDR_IN_MINUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -1
79 ; CHECK: [[LD_IN_MINUS_1:%[^ ]+]] = load i16, i16* [[ADDR_IN_MINUS_1]], align 2
80 ; CHECK: [[IN_MINUS_1:%[^ ]+]] = sext i16 [[LD_IN_MINUS_1]] to i32
81 ; CHECK: [[ADDR_B_PLUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 1
82 ; CHECK: [[LD_B_PLUS_1:%[^ ]+]] = load i16, i16* [[ADDR_B_PLUS_1]], align 2
83 ; CHECK: [[B_PLUS_1:%[^ ]+]] = sext i16 [[LD_B_PLUS_1]] to i32
84 ; CHECK: [[MUL0:%[^ ]+]] = mul nsw i32 [[B_PLUS_1]], [[IN_MINUS_1]]
85 ; CHECK: [[ADDR_IN_MINUS_3:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -3
86 ; CHECK: [[CAST_ADDR_IN_MINUS_3:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_3]] to i32*
87 ; CHECK: [[IN_MINUS_3:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_3]], align 2
88 ; CHECK: [[ADDR_B_PLUS_2:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 2
89 ; CHECK: [[CAST_ADDR_B_PLUS_2:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_2]] to i32*
90 ; CHECK: [[B_PLUS_2:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_2]], align 2
91 ; CHECK: [[ADDR_IN_MINUS_5:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -5
92 ; CHECK: [[CAST_ADDR_IN_MINUS_5:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_5]] to i32*
93 ; CHECK: [[IN_MINUS_5:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_5]], align 2
94 ; CHECK: [[ADDR_B_PLUS_4:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 4
95 ; CHECK: [[CAST_ADDR_B_PLUS_4:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_4]] to i32*
96 ; CHECK: [[B_PLUS_4:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_4]], align 2
97 ; CHECK: [[ACC:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN_MINUS_5]], i32 [[B_PLUS_4]], i32 [[MUL0]])
98 ; CHECK: [[RES:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN_MINUS_3]], i32 [[B_PLUS_2]], i32 [[ACC]])
99 ; CHECK: ret i32 [[RES]]
100 define i32 @with_no_acc_input(i16* nocapture readonly %in, i16* nocapture readonly %b) {
102 %arrayidx3 = getelementptr inbounds i16, i16* %in, i32 -1
103 %ld.2 = load i16, i16* %arrayidx3, align 2
104 %conv4 = sext i16 %ld.2 to i32
105 %arrayidx5 = getelementptr inbounds i16, i16* %b, i32 1
106 %ld.3 = load i16, i16* %arrayidx5, align 2
107 %conv6 = sext i16 %ld.3 to i32
108 %mul = mul nsw i32 %conv6, %conv4
109 %arrayidx7 = getelementptr inbounds i16, i16* %in, i32 -2
110 %ld.4 = load i16, i16* %arrayidx7, align 2
111 %conv8 = sext i16 %ld.4 to i32
112 %arrayidx9 = getelementptr inbounds i16, i16* %b, i32 2
113 %ld.5 = load i16, i16* %arrayidx9, align 2
114 %conv10 = sext i16 %ld.5 to i32
115 %mul11 = mul nsw i32 %conv10, %conv8
116 %add12 = add i32 %mul, %mul11
117 %arrayidx13 = getelementptr inbounds i16, i16* %in, i32 -3
118 %ld.6 = load i16, i16* %arrayidx13, align 2
119 %conv14 = sext i16 %ld.6 to i32
120 %arrayidx15 = getelementptr inbounds i16, i16* %b, i32 3
121 %ld.7 = load i16, i16* %arrayidx15, align 2
122 %conv16 = sext i16 %ld.7 to i32
123 %mul17 = mul nsw i32 %conv16, %conv14
124 %add18 = add i32 %add12, %mul17
125 %arrayidx19 = getelementptr inbounds i16, i16* %in, i32 -4
126 %ld.8 = load i16, i16* %arrayidx19, align 2
127 %conv20 = sext i16 %ld.8 to i32
128 %arrayidx21 = getelementptr inbounds i16, i16* %b, i32 4
129 %ld.9 = load i16, i16* %arrayidx21, align 2
130 %conv22 = sext i16 %ld.9 to i32
131 %mul23 = mul nsw i32 %conv22, %conv20
132 %add24 = add i32 %add18, %mul23
133 %arrayidx25 = getelementptr inbounds i16, i16* %in, i32 -5
134 %ld.10 = load i16, i16* %arrayidx25, align 2
135 %conv26 = sext i16 %ld.10 to i32
136 %arrayidx27 = getelementptr inbounds i16, i16* %b, i32 5
137 %ld.11 = load i16, i16* %arrayidx27, align 2
138 %conv28 = sext i16 %ld.11 to i32
139 %mul29 = mul nsw i32 %conv28, %conv26
140 %add30 = add i32 %add24, %mul29
144 ; CHECK-LABEL: with_64bit_acc
145 ; CHECK: [[ADDR_IN_MINUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -1
146 ; CHECK: [[LD_IN_MINUS_1:%[^ ]+]] = load i16, i16* [[ADDR_IN_MINUS_1]], align 2
147 ; CHECK: [[IN_MINUS_1:%[^ ]+]] = sext i16 [[LD_IN_MINUS_1]] to i32
148 ; CHECK: [[ADDR_B_PLUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 1
149 ; CHECK: [[LD_B_PLUS_1:%[^ ]+]] = load i16, i16* [[ADDR_B_PLUS_1]], align 2
150 ; CHECK: [[B_PLUS_1:%[^ ]+]] = sext i16 [[LD_B_PLUS_1]] to i32
151 ; CHECK: [[MUL0:%[^ ]+]] = mul nsw i32 [[B_PLUS_1]], [[IN_MINUS_1]]
152 ; CHECK: [[SEXT1:%[^ ]+]] = sext i32 [[MUL0]] to i64
153 ; CHECK: [[ADD0:%[^ ]+]] = add i64 %sext.0, [[SEXT1]]
154 ; CHECK: [[ADDR_IN_MINUS_3:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -3
155 ; CHECK: [[CAST_ADDR_IN_MINUS_3:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_3]] to i32*
156 ; CHECK: [[IN_MINUS_3:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_3]], align 2
157 ; CHECK: [[ADDR_B_PLUS_2:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 2
158 ; CHECK: [[CAST_ADDR_B_PLUS_2:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_2]] to i32*
159 ; CHECK: [[B_PLUS_2:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_2]], align 2
160 ; CHECK: [[ADDR_IN_MINUS_5:%[^ ]+]] = getelementptr inbounds i16, i16* %in, i32 -5
161 ; CHECK: [[CAST_ADDR_IN_MINUS_5:%[^ ]+]] = bitcast i16* [[ADDR_IN_MINUS_5]] to i32*
162 ; CHECK: [[IN_MINUS_5:%[^ ]+]] = load i32, i32* [[CAST_ADDR_IN_MINUS_5]], align 2
163 ; CHECK: [[ADDR_B_PLUS_4:%[^ ]+]] = getelementptr inbounds i16, i16* %b, i32 4
164 ; CHECK: [[CAST_ADDR_B_PLUS_4:%[^ ]+]] = bitcast i16* [[ADDR_B_PLUS_4]] to i32*
165 ; CHECK: [[B_PLUS_4:%[^ ]+]] = load i32, i32* [[CAST_ADDR_B_PLUS_4]], align 2
166 ; CHECK: [[ACC:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN_MINUS_5]], i32 [[B_PLUS_4]], i64 [[ADD0]])
167 ; CHECK: [[RES:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN_MINUS_3]], i32 [[B_PLUS_2]], i64 [[ACC]])
168 ; CHECK: ret i64 [[RES]]
169 define i64 @with_64bit_acc(i16* nocapture readonly %in, i16* nocapture readonly %b) {
171 %0 = load i16, i16* %in, align 2
172 %conv = sext i16 %0 to i32
173 %1 = load i16, i16* %b, align 2
174 %conv2 = sext i16 %1 to i32
175 %call = tail call i32 @bar(i32 %conv, i32 %conv2)
176 %sext.0 = sext i32 %call to i64
177 %arrayidx3 = getelementptr inbounds i16, i16* %in, i32 -1
178 %2 = load i16, i16* %arrayidx3, align 2
179 %conv4 = sext i16 %2 to i32
180 %arrayidx5 = getelementptr inbounds i16, i16* %b, i32 1
181 %3 = load i16, i16* %arrayidx5, align 2
182 %conv6 = sext i16 %3 to i32
183 %mul = mul nsw i32 %conv6, %conv4
184 %sext.1 = sext i32 %mul to i64
185 %add = add i64 %sext.0, %sext.1
186 %arrayidx7 = getelementptr inbounds i16, i16* %in, i32 -2
187 %4 = load i16, i16* %arrayidx7, align 2
188 %conv8 = sext i16 %4 to i32
189 %arrayidx9 = getelementptr inbounds i16, i16* %b, i32 2
190 %5 = load i16, i16* %arrayidx9, align 2
191 %conv10 = sext i16 %5 to i32
192 %mul11 = mul nsw i32 %conv10, %conv8
193 %sext.2 = sext i32 %mul11 to i64
194 %add12 = add i64 %add, %sext.2
195 %arrayidx13 = getelementptr inbounds i16, i16* %in, i32 -3
196 %6 = load i16, i16* %arrayidx13, align 2
197 %conv14 = sext i16 %6 to i32
198 %arrayidx15 = getelementptr inbounds i16, i16* %b, i32 3
199 %7 = load i16, i16* %arrayidx15, align 2
200 %conv16 = sext i16 %7 to i32
201 %mul17 = mul nsw i32 %conv16, %conv14
202 %sext.3 = sext i32 %mul17 to i64
203 %add18 = add i64 %add12, %sext.3
204 %arrayidx19 = getelementptr inbounds i16, i16* %in, i32 -4
205 %8 = load i16, i16* %arrayidx19, align 2
206 %conv20 = sext i16 %8 to i32
207 %arrayidx21 = getelementptr inbounds i16, i16* %b, i32 4
208 %9 = load i16, i16* %arrayidx21, align 2
209 %conv22 = sext i16 %9 to i32
210 %mul23 = mul nsw i32 %conv22, %conv20
211 %sext.4 = sext i32 %mul23 to i64
212 %add24 = add i64 %add18, %sext.4
213 %arrayidx25 = getelementptr inbounds i16, i16* %in, i32 -5
214 %10 = load i16, i16* %arrayidx25, align 2
215 %conv26 = sext i16 %10 to i32
216 %arrayidx27 = getelementptr inbounds i16, i16* %b, i32 5
217 %11 = load i16, i16* %arrayidx27, align 2
218 %conv28 = sext i16 %11 to i32
219 %mul29 = mul nsw i32 %conv28, %conv26
220 %sext.5 = sext i32 %mul29 to i64
221 %add30 = add i64 %add24, %sext.5
225 ; CHECK: with_64bit_add_acc
226 ; CHECK: [[ADDR_X_PLUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %px.10756.unr, i32 1
227 ; CHECK: [[X:%[^ ]+]] = load i16, i16* %px.10756.unr, align 2
228 ; CHECK: [[SEXT_X:%[^ ]+]] = sext i16 [[X]] to i32
229 ; CHECK: [[ADDR_Y_MINUS_1:%[^ ]+]] = getelementptr inbounds i16, i16* %py.8757.unr, i32 -1
230 ; CHECK: [[Y:%[^ ]+]] = load i16, i16* %py.8757.unr, align 2
231 ; CHECK: [[SEXT_Y:%[^ ]+]] = sext i16 [[Y]] to i32
232 ; CHECK: [[MUL0:%[^ ]+]] = mul nsw i32 [[SEXT_Y]], [[SEXT_X]]
233 ; CHECK: [[SEXT_MUL0:%[^ ]+]] = sext i32 [[MUL0]] to i64
234 ; CHECK: [[ADD_1:%[^ ]+]] = add nsw i64 %sum.3758.unr, [[SEXT_MUL0]]
235 ; CHECK: [[X_PLUS_2:%[^ ]+]] = getelementptr inbounds i16, i16* %px.10756.unr, i32 2
236 ; CHECK: [[X_1:%[^ ]+]] = load i16, i16* [[ADDR_X_PLUS_1]], align 2
237 ; CHECK: [[SEXT_X_1:%[^ ]+]] = sext i16 [[X_1]] to i32
238 ; CHECK: [[Y_1:%[^ ]+]] = load i16, i16* [[ADDR_Y_MINUS_1]], align 2
239 ; CHECK: [[SEXT_Y_1:%[^ ]+]] = sext i16 [[Y_1]] to i32
240 ; CHECK: [[UNPAIRED:%[^ ]+]] = mul nsw i32 [[SEXT_Y_1]], [[SEXT_X_1]]
241 ; CHECK: [[ADDR_X_PLUS_2:%[^ ]+]] = bitcast i16* [[X_PLUS_2]] to i32*
242 ; CHECK: [[X_2:%[^ ]+]] = load i32, i32* [[ADDR_X_PLUS_2]], align 2
243 ; CHECK: [[Y_MINUS_3:%[^ ]+]] = getelementptr inbounds i16, i16* %py.8757.unr, i32 -3
244 ; CHECK: [[ADDR_Y_MINUS_3:%[^ ]+]] = bitcast i16* [[Y_MINUS_3]] to i32*
245 ; CHECK: [[Y_3:%[^ ]+]] = load i32, i32* [[ADDR_Y_MINUS_3]], align 2
246 ; CHECK: [[SEXT:%[^ ]+]] = sext i32 [[UNPAIRED]] to i64
247 ; CHECK: [[ACC:%[^ ]+]] = add i64 [[SEXT]], [[ADD_1]]
248 ; CHECK: [[RES:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[Y_3]], i32 [[X_2]], i64 [[ACC]])
249 ; CHECK: ret i64 [[RES]]
250 define i64 @with_64bit_add_acc(i16* nocapture readonly %px.10756.unr, i16* nocapture readonly %py.8757.unr, i32 %acc) {
252 %sum.3758.unr = sext i32 %acc to i64
256 %incdec.ptr184.epil = getelementptr inbounds i16, i16* %px.10756.unr, i32 1
257 %tmp216 = load i16, i16* %px.10756.unr, align 2
258 %conv185.epil = sext i16 %tmp216 to i32
259 %incdec.ptr186.epil = getelementptr inbounds i16, i16* %py.8757.unr, i32 -1
260 %tmp217 = load i16, i16* %py.8757.unr, align 2
261 %conv187.epil = sext i16 %tmp217 to i32
262 %mul.epil = mul nsw i32 %conv187.epil, %conv185.epil
263 %conv188.epil = sext i32 %mul.epil to i64
264 %add189.epil = add nsw i64 %sum.3758.unr, %conv188.epil
265 %incdec.ptr190.epil = getelementptr inbounds i16, i16* %px.10756.unr, i32 2
266 %tmp218 = load i16, i16* %incdec.ptr184.epil, align 2
267 %conv191.epil = sext i16 %tmp218 to i32
268 %incdec.ptr192.epil = getelementptr inbounds i16, i16* %py.8757.unr, i32 -2
269 %tmp219 = load i16, i16* %incdec.ptr186.epil, align 2
270 %conv193.epil = sext i16 %tmp219 to i32
271 %mul194.epil = mul nsw i32 %conv193.epil, %conv191.epil
272 %conv195.epil = sext i32 %mul194.epil to i64
273 %add196.epil = add nsw i64 %add189.epil, %conv195.epil
274 %incdec.ptr197.epil = getelementptr inbounds i16, i16* %px.10756.unr, i32 3
275 %tmp220 = load i16, i16* %incdec.ptr190.epil, align 2
276 %conv198.epil = sext i16 %tmp220 to i32
277 %incdec.ptr199.epil = getelementptr inbounds i16, i16* %py.8757.unr, i32 -3
278 %tmp221 = load i16, i16* %incdec.ptr192.epil, align 2
279 %conv200.epil = sext i16 %tmp221 to i32
280 %mul201.epil = mul nsw i32 %conv200.epil, %conv198.epil
281 %conv202.epil = sext i32 %mul201.epil to i64
282 %add203.epil = add nsw i64 %add196.epil, %conv202.epil
283 %tmp222 = load i16, i16* %incdec.ptr197.epil, align 2
284 %conv205.epil = sext i16 %tmp222 to i32
285 %tmp223 = load i16, i16* %incdec.ptr199.epil, align 2
286 %conv207.epil = sext i16 %tmp223 to i32
287 %mul208.epil = mul nsw i32 %conv207.epil, %conv205.epil
288 %conv209.epil = sext i32 %mul208.epil to i64
289 %add210.epil = add nsw i64 %add203.epil, %conv209.epil
293 declare dso_local i32 @bar(i32, i32) local_unnamed_addr