1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
3 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED
8 unsigned short _Accum usa
;
10 unsigned long _Accum ula
;
15 unsigned short _Fract usf
;
17 unsigned long _Fract ulf
;
19 _Sat
short _Accum sa_sat
;
21 _Sat
long _Accum la_sat
;
22 _Sat
unsigned short _Accum usa_sat
;
23 _Sat
unsigned _Accum ua_sat
;
24 _Sat
unsigned long _Accum ula_sat
;
25 _Sat
unsigned _Fract uf_sat
;
31 // CHECK-LABEL: @smul_sasasa(
33 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
34 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @sa, align 2
35 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
36 // CHECK-NEXT: store i16 [[TMP2]], ptr @sa, align 2
37 // CHECK-NEXT: ret void
39 void smul_sasasa(void) {
43 // CHECK-LABEL: @smul_asaa(
45 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
46 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
47 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
48 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
49 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
50 // CHECK-NEXT: store i32 [[TMP2]], ptr @a, align 4
51 // CHECK-NEXT: ret void
53 void smul_asaa(void) {
57 // CHECK-LABEL: @smul_sasasf(
59 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
60 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @sf, align 1
61 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i16
62 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
63 // CHECK-NEXT: store i16 [[TMP2]], ptr @sa, align 2
64 // CHECK-NEXT: ret void
66 void smul_sasasf(void) {
70 // CHECK-LABEL: @smul_sasaf(
72 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
73 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @f, align 2
74 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i24
75 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
76 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i24
77 // CHECK-NEXT: [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
78 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i24 [[TMP2]], 8
79 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
80 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
81 // CHECK-NEXT: ret void
83 void smul_sasaf(void) {
87 // CHECK-LABEL: @smul_aasf(
89 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4
90 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @sf, align 1
91 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i32
92 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
93 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
94 // CHECK-NEXT: store i32 [[TMP2]], ptr @a, align 4
95 // CHECK-NEXT: ret void
97 void smul_aasf(void) {
101 // CHECK-LABEL: @smul_aalf(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4
104 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @lf, align 4
105 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i48
106 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i48 [[RESIZE]], 16
107 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i48
108 // CHECK-NEXT: [[TMP2:%.*]] = call i48 @llvm.smul.fix.i48(i48 [[UPSCALE]], i48 [[RESIZE1]], i32 31)
109 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i48 [[TMP2]], 16
110 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i48 [[DOWNSCALE]] to i32
111 // CHECK-NEXT: store i32 [[RESIZE2]], ptr @a, align 4
112 // CHECK-NEXT: ret void
114 void smul_aalf(void) {
118 // SIGNED-LABEL: @smul_sasausa(
119 // SIGNED-NEXT: entry:
120 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
121 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
122 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
123 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
124 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i17
125 // SIGNED-NEXT: [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
126 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
127 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
128 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
129 // SIGNED-NEXT: ret void
131 // UNSIGNED-LABEL: @smul_sasausa(
132 // UNSIGNED-NEXT: entry:
133 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
134 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
135 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
136 // UNSIGNED-NEXT: store i16 [[TMP2]], ptr @sa, align 2
137 // UNSIGNED-NEXT: ret void
139 void smul_sasausa(void) {
143 // SIGNED-LABEL: @smul_asaua(
144 // SIGNED-NEXT: entry:
145 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
146 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ua, align 4
147 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i33
148 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 9
149 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i33
150 // SIGNED-NEXT: [[TMP2:%.*]] = call i33 @llvm.smul.fix.i33(i33 [[UPSCALE]], i33 [[RESIZE1]], i32 16)
151 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
152 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
153 // SIGNED-NEXT: store i32 [[RESIZE2]], ptr @a, align 4
154 // SIGNED-NEXT: ret void
156 // UNSIGNED-LABEL: @smul_asaua(
157 // UNSIGNED-NEXT: entry:
158 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
159 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ua, align 4
160 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
161 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
162 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
163 // UNSIGNED-NEXT: store i32 [[TMP2]], ptr @a, align 4
164 // UNSIGNED-NEXT: ret void
166 void smul_asaua(void) {
170 // SIGNED-LABEL: @smul_sasausf(
171 // SIGNED-NEXT: entry:
172 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
173 // SIGNED-NEXT: [[TMP1:%.*]] = load i8, ptr @usf, align 1
174 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
175 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
176 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i8 [[TMP1]] to i17
177 // SIGNED-NEXT: [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
178 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
179 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
180 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
181 // SIGNED-NEXT: ret void
183 // UNSIGNED-LABEL: @smul_sasausf(
184 // UNSIGNED-NEXT: entry:
185 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
186 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i8, ptr @usf, align 1
187 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
188 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
189 // UNSIGNED-NEXT: store i16 [[TMP2]], ptr @sa, align 2
190 // UNSIGNED-NEXT: ret void
192 void smul_sasausf(void) {
196 // SIGNED-LABEL: @smul_sasaulf(
197 // SIGNED-NEXT: entry:
198 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
199 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ulf, align 4
200 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i41
201 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i41 [[RESIZE]], 25
202 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i41
203 // SIGNED-NEXT: [[TMP2:%.*]] = call i41 @llvm.smul.fix.i41(i41 [[UPSCALE]], i41 [[RESIZE1]], i32 32)
204 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i41 [[TMP2]], 25
205 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i41 [[DOWNSCALE]] to i16
206 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
207 // SIGNED-NEXT: ret void
209 // UNSIGNED-LABEL: @smul_sasaulf(
210 // UNSIGNED-NEXT: entry:
211 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
212 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ulf, align 4
213 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
214 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 24
215 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
216 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 31)
217 // UNSIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i40 [[TMP2]], 24
218 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[DOWNSCALE]] to i16
219 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
220 // UNSIGNED-NEXT: ret void
222 void smul_sasaulf(void) {
226 // CHECK-LABEL: @smul_aaaaa(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4
229 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a2, align 4
230 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[TMP1]], i32 15)
231 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @a3, align 4
232 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP2]], i32 [[TMP3]], i32 15)
233 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @a4, align 4
234 // CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP4]], i32 [[TMP5]], i32 15)
235 // CHECK-NEXT: store i32 [[TMP6]], ptr @a, align 4
236 // CHECK-NEXT: ret void
238 void smul_aaaaa(void) {
239 a
= a
* a2
* a3
* a4
;
243 // SIGNED-LABEL: @umul_usausausa(
244 // SIGNED-NEXT: entry:
245 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
246 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
247 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
248 // SIGNED-NEXT: store i16 [[TMP2]], ptr @usa, align 2
249 // SIGNED-NEXT: ret void
251 // UNSIGNED-LABEL: @umul_usausausa(
252 // UNSIGNED-NEXT: entry:
253 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
254 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
255 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
256 // UNSIGNED-NEXT: store i16 [[TMP2]], ptr @usa, align 2
257 // UNSIGNED-NEXT: ret void
259 void umul_usausausa(void) {
263 // SIGNED-LABEL: @umul_uausaua(
264 // SIGNED-NEXT: entry:
265 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
266 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ua, align 4
267 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
268 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
269 // SIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.umul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 16)
270 // SIGNED-NEXT: store i32 [[TMP2]], ptr @ua, align 4
271 // SIGNED-NEXT: ret void
273 // UNSIGNED-LABEL: @umul_uausaua(
274 // UNSIGNED-NEXT: entry:
275 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
276 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ua, align 4
277 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
278 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
279 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
280 // UNSIGNED-NEXT: store i32 [[TMP2]], ptr @ua, align 4
281 // UNSIGNED-NEXT: ret void
283 void umul_uausaua(void) {
287 // SIGNED-LABEL: @umul_usausausf(
288 // SIGNED-NEXT: entry:
289 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
290 // SIGNED-NEXT: [[TMP1:%.*]] = load i8, ptr @usf, align 1
291 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
292 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 8)
293 // SIGNED-NEXT: store i16 [[TMP2]], ptr @usa, align 2
294 // SIGNED-NEXT: ret void
296 // UNSIGNED-LABEL: @umul_usausausf(
297 // UNSIGNED-NEXT: entry:
298 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
299 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i8, ptr @usf, align 1
300 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
301 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
302 // UNSIGNED-NEXT: store i16 [[TMP2]], ptr @usa, align 2
303 // UNSIGNED-NEXT: ret void
305 void umul_usausausf(void) {
309 // SIGNED-LABEL: @umul_usausauf(
310 // SIGNED-NEXT: entry:
311 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
312 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @uf, align 2
313 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
314 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
315 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
316 // SIGNED-NEXT: [[TMP2:%.*]] = call i24 @llvm.umul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 16)
317 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
318 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
319 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
320 // SIGNED-NEXT: ret void
322 // UNSIGNED-LABEL: @umul_usausauf(
323 // UNSIGNED-NEXT: entry:
324 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
325 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @uf, align 2
326 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
327 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
328 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
329 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
330 // UNSIGNED-NEXT: [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
331 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
332 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
333 // UNSIGNED-NEXT: ret void
335 void umul_usausauf(void) {
340 // CHECK-LABEL: @int_sasai(
341 // CHECK-NEXT: entry:
342 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
343 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
344 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
345 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
346 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
347 // CHECK-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
348 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
349 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
350 // CHECK-NEXT: ret void
352 void int_sasai(void) {
356 // CHECK-LABEL: @int_sasaui(
357 // CHECK-NEXT: entry:
358 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
359 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @ui, align 4
360 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
361 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
362 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
363 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
364 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
365 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
366 // CHECK-NEXT: ret void
368 void int_sasaui(void) {
372 // SIGNED-LABEL: @int_usausai(
373 // SIGNED-NEXT: entry:
374 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
375 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
376 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
377 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
378 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
379 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
380 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
381 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
382 // SIGNED-NEXT: ret void
384 // UNSIGNED-LABEL: @int_usausai(
385 // UNSIGNED-NEXT: entry:
386 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
387 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
388 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
389 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
390 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
391 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
392 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
393 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
394 // UNSIGNED-NEXT: ret void
396 void int_usausai(void) {
400 // SIGNED-LABEL: @int_usausaui(
401 // SIGNED-NEXT: entry:
402 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
403 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ui, align 4
404 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
405 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
406 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
407 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
408 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
409 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
410 // SIGNED-NEXT: ret void
412 // UNSIGNED-LABEL: @int_usausaui(
413 // UNSIGNED-NEXT: entry:
414 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
415 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @ui, align 4
416 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
417 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i39
418 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
419 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
420 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
421 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
422 // UNSIGNED-NEXT: ret void
424 void int_usausaui(void) {
428 // CHECK-LABEL: @int_lflfui(
429 // CHECK-NEXT: entry:
430 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @lf, align 4
431 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @ui, align 4
432 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i64
433 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i64
434 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i64 [[RESIZE1]], 31
435 // CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.smul.fix.i64(i64 [[RESIZE]], i64 [[UPSCALE]], i32 31)
436 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i64 [[TMP2]] to i32
437 // CHECK-NEXT: store i32 [[RESIZE2]], ptr @lf, align 4
438 // CHECK-NEXT: ret void
440 void int_lflfui(void) {
444 // CHECK-LABEL: @int_aab(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4
447 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @b, align 1
448 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP1]] to i1
449 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
450 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
451 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[CONV]] to i47
452 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
453 // CHECK-NEXT: [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[RESIZE]], i47 [[UPSCALE]], i32 15)
454 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
455 // CHECK-NEXT: store i32 [[RESIZE2]], ptr @a, align 4
456 // CHECK-NEXT: ret void
462 // CHECK-LABEL: @int_aia(
463 // CHECK-NEXT: entry:
464 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @i, align 4
465 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
466 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
467 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
468 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i47
469 // CHECK-NEXT: [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[UPSCALE]], i47 [[RESIZE1]], i32 15)
470 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
471 // CHECK-NEXT: store i32 [[RESIZE2]], ptr @a, align 4
472 // CHECK-NEXT: ret void
478 // SIGNED-LABEL: @int_usauiusa(
479 // SIGNED-NEXT: entry:
480 // SIGNED-NEXT: [[TMP0:%.*]] = load i32, ptr @ui, align 4
481 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
482 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
483 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 8
484 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i40
485 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 8)
486 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
487 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
488 // SIGNED-NEXT: ret void
490 // UNSIGNED-LABEL: @int_usauiusa(
491 // UNSIGNED-NEXT: entry:
492 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i32, ptr @ui, align 4
493 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa, align 2
494 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i39
495 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE]], 7
496 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i39
497 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[UPSCALE]], i39 [[RESIZE1]], i32 7)
498 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
499 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa, align 2
500 // UNSIGNED-NEXT: ret void
502 void int_usauiusa(void) {
506 // CHECK-LABEL: @int_sauisa(
507 // CHECK-NEXT: entry:
508 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @ui, align 4
509 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @sa, align 2
510 // CHECK-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
511 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 7
512 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i40
513 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 7)
514 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
515 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa, align 2
516 // CHECK-NEXT: ret void
518 void int_sauisa(void) {
523 // CHECK-LABEL: @sat_sassasas(
524 // CHECK-NEXT: entry:
525 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa, align 2
526 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @sa_sat, align 2
527 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
528 // CHECK-NEXT: store i16 [[TMP2]], ptr @sa_sat, align 2
529 // CHECK-NEXT: ret void
531 void sat_sassasas(void) {
532 sa_sat
= sa
* sa_sat
;
535 // SIGNED-LABEL: @sat_usasusausas(
536 // SIGNED-NEXT: entry:
537 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
538 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa_sat, align 2
539 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
540 // SIGNED-NEXT: store i16 [[TMP2]], ptr @usa_sat, align 2
541 // SIGNED-NEXT: ret void
543 // UNSIGNED-LABEL: @sat_usasusausas(
544 // UNSIGNED-NEXT: entry:
545 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa, align 2
546 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa_sat, align 2
547 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
548 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
549 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
550 // UNSIGNED-NEXT: store i16 [[RESIZE1]], ptr @usa_sat, align 2
551 // UNSIGNED-NEXT: ret void
553 void sat_usasusausas(void) {
554 usa_sat
= usa
* usa_sat
;
557 // SIGNED-LABEL: @sat_uasuausas(
558 // SIGNED-NEXT: entry:
559 // SIGNED-NEXT: [[TMP0:%.*]] = load i32, ptr @ua, align 4
560 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa_sat, align 2
561 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
562 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
563 // SIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.umul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 16)
564 // SIGNED-NEXT: store i32 [[TMP2]], ptr @ua_sat, align 4
565 // SIGNED-NEXT: ret void
567 // UNSIGNED-LABEL: @sat_uasuausas(
568 // UNSIGNED-NEXT: entry:
569 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i32, ptr @ua, align 4
570 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @usa_sat, align 2
571 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
572 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
573 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
574 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
575 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
576 // UNSIGNED-NEXT: store i32 [[RESIZE2]], ptr @ua_sat, align 4
577 // UNSIGNED-NEXT: ret void
579 void sat_uasuausas(void) {
580 ua_sat
= ua
* usa_sat
;
583 // CHECK-LABEL: @sat_sassasi(
584 // CHECK-NEXT: entry:
585 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa_sat, align 2
586 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
587 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
588 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
589 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
590 // CHECK-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
591 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
592 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
593 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], -32768
594 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 -32768, i39 [[SATMAX]]
595 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
596 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa_sat, align 2
597 // CHECK-NEXT: ret void
599 void sat_sassasi(void) {
603 // CHECK-LABEL: @sat_sassasui(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @sa_sat, align 2
606 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @ui, align 4
607 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
608 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
609 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
610 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
611 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 32767
612 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 32767, i40 [[TMP2]]
613 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], -32768
614 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 -32768, i40 [[SATMAX]]
615 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
616 // CHECK-NEXT: store i16 [[RESIZE2]], ptr @sa_sat, align 2
617 // CHECK-NEXT: ret void
619 void sat_sassasui(void) {
620 sa_sat
= sa_sat
* ui
;
623 // SIGNED-LABEL: @sat_ufsufsufs(
624 // SIGNED-NEXT: entry:
625 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @uf_sat, align 2
626 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @uf_sat, align 2
627 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 16)
628 // SIGNED-NEXT: store i16 [[TMP2]], ptr @uf_sat, align 2
629 // SIGNED-NEXT: ret void
631 // UNSIGNED-LABEL: @sat_ufsufsufs(
632 // UNSIGNED-NEXT: entry:
633 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @uf_sat, align 2
634 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, ptr @uf_sat, align 2
635 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 15)
636 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
637 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
638 // UNSIGNED-NEXT: store i16 [[RESIZE1]], ptr @uf_sat, align 2
639 // UNSIGNED-NEXT: ret void
641 void sat_ufsufsufs(void) {
642 uf_sat
= uf_sat
* uf_sat
;
645 // SIGNED-LABEL: @sat_usasusasi(
646 // SIGNED-NEXT: entry:
647 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa_sat, align 2
648 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
649 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
650 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
651 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
652 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
653 // SIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 65535
654 // SIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 65535, i40 [[TMP2]]
655 // SIGNED-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], 0
656 // SIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 0, i40 [[SATMAX]]
657 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
658 // SIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa_sat, align 2
659 // SIGNED-NEXT: ret void
661 // UNSIGNED-LABEL: @sat_usasusasi(
662 // UNSIGNED-NEXT: entry:
663 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, ptr @usa_sat, align 2
664 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4
665 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
666 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
667 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
668 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
669 // UNSIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
670 // UNSIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
671 // UNSIGNED-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], 0
672 // UNSIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 0, i39 [[SATMAX]]
673 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
674 // UNSIGNED-NEXT: store i16 [[RESIZE2]], ptr @usa_sat, align 2
675 // UNSIGNED-NEXT: ret void
677 void sat_usasusasi(void) {
678 usa_sat
= usa_sat
* i
;