1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; REQUIRES: x86-registered-target,sparc-registered-target
3 ; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-linux" | FileCheck %s --check-prefixes=CHECK,DOUBLE-4BYTE-ALIGN
4 ; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-win32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
5 ; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
6 ; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-mingw32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
7 ; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
8 ; RUN: opt < %s -passes=instcombine -S -mtriple "sparc-sun-solaris" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
9 ; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 | FileCheck --check-prefix=DBG-VALID %s
10 ; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 --try-experimental-debuginfo-iterators | FileCheck --check-prefix=DBG-VALID %s
12 declare double @floor(double)
13 declare double @ceil(double)
14 declare double @round(double)
15 declare double @roundeven(double)
16 declare double @nearbyint(double)
17 declare double @trunc(double)
18 declare double @fabs(double)
20 declare double @llvm.ceil.f64(double)
21 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
23 declare double @llvm.fabs.f64(double)
24 declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
26 declare double @llvm.floor.f64(double)
27 declare <2 x double> @llvm.floor.v2f64(<2 x double>)
29 declare double @llvm.nearbyint.f64(double)
30 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
32 declare float @llvm.rint.f32(float)
33 declare <2 x float> @llvm.rint.v2f32(<2 x float>)
35 declare double @llvm.round.f64(double)
36 declare <2 x double> @llvm.round.v2f64(<2 x double>)
38 declare double @llvm.roundeven.f64(double)
39 declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
41 declare double @llvm.trunc.f64(double)
42 declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
44 define float @test_shrink_libcall_floor(float %C) {
45 ; CHECK-LABEL: @test_shrink_libcall_floor(
46 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
47 ; CHECK-NEXT: ret float [[TMP1]]
49 %D = fpext float %C to double
51 %E = call double @floor(double %D)
52 %F = fptrunc double %E to float
56 define float @test_shrink_libcall_ceil(float %C) {
57 ; CHECK-LABEL: @test_shrink_libcall_ceil(
58 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
59 ; CHECK-NEXT: ret float [[TMP1]]
61 %D = fpext float %C to double
63 %E = call double @ceil(double %D)
64 %F = fptrunc double %E to float
68 define float @test_shrink_libcall_round(float %C) {
69 ; CHECK-LABEL: @test_shrink_libcall_round(
70 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
71 ; CHECK-NEXT: ret float [[TMP1]]
73 %D = fpext float %C to double
75 %E = call double @round(double %D)
76 %F = fptrunc double %E to float
80 define float @test_shrink_libcall_roundeven(float %C) {
81 ; CHECK-LABEL: @test_shrink_libcall_roundeven(
82 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
83 ; CHECK-NEXT: ret float [[TMP1]]
85 %D = fpext float %C to double
87 %E = call double @roundeven(double %D)
88 %F = fptrunc double %E to float
92 define float @test_shrink_libcall_nearbyint(float %C) {
93 ; CHECK-LABEL: @test_shrink_libcall_nearbyint(
94 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
95 ; CHECK-NEXT: ret float [[TMP1]]
97 %D = fpext float %C to double
99 %E = call double @nearbyint(double %D)
100 %F = fptrunc double %E to float
104 define float @test_shrink_libcall_trunc(float %C) {
105 ; CHECK-LABEL: @test_shrink_libcall_trunc(
106 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
107 ; CHECK-NEXT: ret float [[TMP1]]
109 %D = fpext float %C to double
111 %E = call double @trunc(double %D)
112 %F = fptrunc double %E to float
116 ; This is replaced with the intrinsic, which does the right thing on
118 define float @test_shrink_libcall_fabs(float %C) {
119 ; CHECK-LABEL: @test_shrink_libcall_fabs(
120 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
121 ; CHECK-NEXT: ret float [[TMP1]]
123 %D = fpext float %C to double
124 %E = call double @fabs(double %D)
125 %F = fptrunc double %E to float
129 ; Make sure fast math flags are preserved
130 define float @test_shrink_libcall_fabs_fast(float %C) {
131 ; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
132 ; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
133 ; CHECK-NEXT: ret float [[TMP1]]
135 %D = fpext float %C to double
136 %E = call fast double @fabs(double %D)
137 %F = fptrunc double %E to float
141 define float @test_shrink_intrin_ceil(float %C) {
142 ; CHECK-LABEL: @test_shrink_intrin_ceil(
143 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
144 ; CHECK-NEXT: ret float [[TMP1]]
146 %D = fpext float %C to double
147 %E = call double @llvm.ceil.f64(double %D)
148 %F = fptrunc double %E to float
152 define float @test_shrink_intrin_fabs(float %C) {
153 ; CHECK-LABEL: @test_shrink_intrin_fabs(
154 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
155 ; CHECK-NEXT: ret float [[TMP1]]
157 %D = fpext float %C to double
158 %E = call double @llvm.fabs.f64(double %D)
159 %F = fptrunc double %E to float
163 define float @test_shrink_intrin_floor(float %C) {
164 ; CHECK-LABEL: @test_shrink_intrin_floor(
165 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
166 ; CHECK-NEXT: ret float [[TMP1]]
168 %D = fpext float %C to double
169 %E = call double @llvm.floor.f64(double %D)
170 %F = fptrunc double %E to float
174 define float @test_shrink_intrin_nearbyint(float %C) {
175 ; CHECK-LABEL: @test_shrink_intrin_nearbyint(
176 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
177 ; CHECK-NEXT: ret float [[TMP1]]
179 %D = fpext float %C to double
180 %E = call double @llvm.nearbyint.f64(double %D)
181 %F = fptrunc double %E to float
185 define half @test_shrink_intrin_rint(half %C) {
186 ; CHECK-LABEL: @test_shrink_intrin_rint(
187 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
188 ; CHECK-NEXT: ret half [[TMP1]]
190 %D = fpext half %C to float
191 %E = call float @llvm.rint.f32(float %D)
192 %F = fptrunc float %E to half
196 define float @test_shrink_intrin_round(float %C) {
197 ; CHECK-LABEL: @test_shrink_intrin_round(
198 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
199 ; CHECK-NEXT: ret float [[TMP1]]
201 %D = fpext float %C to double
202 %E = call double @llvm.round.f64(double %D)
203 %F = fptrunc double %E to float
207 define float @test_shrink_intrin_roundeven(float %C) {
208 ; CHECK-LABEL: @test_shrink_intrin_roundeven(
209 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
210 ; CHECK-NEXT: ret float [[TMP1]]
212 %D = fpext float %C to double
213 %E = call double @llvm.roundeven.f64(double %D)
214 %F = fptrunc double %E to float
218 define float @test_shrink_intrin_trunc(float %C) {
219 ; CHECK-LABEL: @test_shrink_intrin_trunc(
220 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
221 ; CHECK-NEXT: ret float [[TMP1]]
223 %D = fpext float %C to double
224 %E = call double @llvm.trunc.f64(double %D)
225 %F = fptrunc double %E to float
229 declare void @use_v2f64(<2 x double>)
230 declare void @use_v2f32(<2 x float>)
232 define <2 x float> @test_shrink_intrin_ceil_multi_use(<2 x float> %C) {
233 ; CHECK-LABEL: @test_shrink_intrin_ceil_multi_use(
234 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
235 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
236 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
237 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
238 ; CHECK-NEXT: ret <2 x float> [[F]]
240 %D = fpext <2 x float> %C to <2 x double>
241 %E = call <2 x double> @llvm.ceil.v2f64(<2 x double> %D)
242 %F = fptrunc <2 x double> %E to <2 x float>
243 call void @use_v2f64(<2 x double> %D)
247 define <2 x float> @test_shrink_intrin_fabs_multi_use(<2 x float> %C) {
248 ; CHECK-LABEL: @test_shrink_intrin_fabs_multi_use(
249 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
250 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
251 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
252 ; CHECK-NEXT: ret <2 x float> [[TMP1]]
254 %D = fpext <2 x float> %C to <2 x double>
255 %E = call <2 x double> @llvm.fabs.v2f64(<2 x double> %D)
256 %F = fptrunc <2 x double> %E to <2 x float>
257 call void @use_v2f64(<2 x double> %E)
261 define <2 x float> @test_shrink_intrin_floor_multi_use(<2 x float> %C) {
262 ; CHECK-LABEL: @test_shrink_intrin_floor_multi_use(
263 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
264 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
265 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
266 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
267 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
268 ; CHECK-NEXT: ret <2 x float> [[F]]
270 %D = fpext <2 x float> %C to <2 x double>
271 %E = call <2 x double> @llvm.floor.v2f64(<2 x double> %D)
272 %F = fptrunc <2 x double> %E to <2 x float>
273 call void @use_v2f64(<2 x double> %D)
274 call void @use_v2f64(<2 x double> %E)
278 define <2 x float> @test_shrink_intrin_nearbyint_multi_use(<2 x float> %C) {
279 ; CHECK-LABEL: @test_shrink_intrin_nearbyint_multi_use(
280 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
281 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
282 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
283 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
284 ; CHECK-NEXT: ret <2 x float> [[F]]
286 %D = fpext <2 x float> %C to <2 x double>
287 %E = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %D)
288 %F = fptrunc <2 x double> %E to <2 x float>
289 call void @use_v2f64(<2 x double> %D)
293 define <2 x half> @test_shrink_intrin_rint_multi_use(<2 x half> %C) {
294 ; CHECK-LABEL: @test_shrink_intrin_rint_multi_use(
295 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
296 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
297 ; CHECK-NEXT: call void @use_v2f32(<2 x float> [[E]])
298 ; CHECK-NEXT: ret <2 x half> [[TMP1]]
300 %D = fpext <2 x half> %C to <2 x float>
301 %E = call <2 x float> @llvm.rint.v2f32(<2 x float> %D)
302 %F = fptrunc <2 x float> %E to <2 x half>
303 call void @use_v2f32(<2 x float> %E)
307 define <2 x float> @test_shrink_intrin_round_multi_use(<2 x float> %C) {
308 ; CHECK-LABEL: @test_shrink_intrin_round_multi_use(
309 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
310 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
311 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
312 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
313 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
314 ; CHECK-NEXT: ret <2 x float> [[F]]
316 %D = fpext <2 x float> %C to <2 x double>
317 %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
318 %F = fptrunc <2 x double> %E to <2 x float>
319 call void @use_v2f64(<2 x double> %D)
320 call void @use_v2f64(<2 x double> %E)
324 define <2 x float> @test_shrink_intrin_roundeven_multi_use(<2 x float> %C) {
325 ; CHECK-LABEL: @test_shrink_intrin_roundeven_multi_use(
326 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
327 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[D]])
328 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
329 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
330 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
331 ; CHECK-NEXT: ret <2 x float> [[F]]
333 %D = fpext <2 x float> %C to <2 x double>
334 %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
335 %F = fptrunc <2 x double> %E to <2 x float>
336 call void @use_v2f64(<2 x double> %D)
337 call void @use_v2f64(<2 x double> %E)
341 define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
342 ; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
343 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
344 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
345 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
346 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
347 ; CHECK-NEXT: ret <2 x float> [[F]]
349 %D = fpext <2 x float> %C to <2 x double>
350 %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
351 %F = fptrunc <2 x double> %E to <2 x float>
352 call void @use_v2f64(<2 x double> %D)
356 ; Make sure fast math flags are preserved
357 define float @test_shrink_intrin_fabs_fast(float %C) {
358 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast(
359 ; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
360 ; CHECK-NEXT: ret float [[TMP1]]
362 %D = fpext float %C to double
363 %E = call fast double @llvm.fabs.f64(double %D)
364 %F = fptrunc double %E to float
368 define float @test_no_shrink_intrin_floor(double %D) {
369 ; CHECK-LABEL: @test_no_shrink_intrin_floor(
370 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
371 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
372 ; CHECK-NEXT: ret float [[F]]
374 %E = call double @llvm.floor.f64(double %D)
375 %F = fptrunc double %E to float
379 define float @test_no_shrink_intrin_ceil(double %D) {
380 ; CHECK-LABEL: @test_no_shrink_intrin_ceil(
381 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
382 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
383 ; CHECK-NEXT: ret float [[F]]
385 %E = call double @llvm.ceil.f64(double %D)
386 %F = fptrunc double %E to float
390 define float @test_no_shrink_intrin_round(double %D) {
391 ; CHECK-LABEL: @test_no_shrink_intrin_round(
392 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
393 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
394 ; CHECK-NEXT: ret float [[F]]
396 %E = call double @llvm.round.f64(double %D)
397 %F = fptrunc double %E to float
401 define float @test_no_shrink_intrin_roundeven(double %D) {
402 ; CHECK-LABEL: @test_no_shrink_intrin_roundeven(
403 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
404 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
405 ; CHECK-NEXT: ret float [[F]]
407 %E = call double @llvm.roundeven.f64(double %D)
408 %F = fptrunc double %E to float
412 define float @test_no_shrink_intrin_nearbyint(double %D) {
413 ; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
414 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
415 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
416 ; CHECK-NEXT: ret float [[F]]
418 %E = call double @llvm.nearbyint.f64(double %D)
419 %F = fptrunc double %E to float
423 define float @test_no_shrink_intrin_trunc(double %D) {
424 ; CHECK-LABEL: @test_no_shrink_intrin_trunc(
425 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
426 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
427 ; CHECK-NEXT: ret float [[F]]
429 %E = call double @llvm.trunc.f64(double %D)
430 %F = fptrunc double %E to float
434 define float @test_shrink_intrin_fabs_double_src(double %D) {
435 ; CHECK-LABEL: @test_shrink_intrin_fabs_double_src(
436 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
437 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
438 ; CHECK-NEXT: ret float [[F]]
440 %E = call double @llvm.fabs.f64(double %D)
441 %F = fptrunc double %E to float
445 ; Make sure fast math flags are preserved
446 define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
447 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast_double_src(
448 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
449 ; CHECK-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
450 ; CHECK-NEXT: ret float [[F]]
452 %E = call fast double @llvm.fabs.f64(double %D)
453 %F = fptrunc double %E to float
457 define float @test_shrink_float_convertible_constant_intrin_floor() {
458 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
459 ; CHECK-NEXT: ret float 2.000000e+00
461 %E = call double @llvm.floor.f64(double 2.1)
462 %F = fptrunc double %E to float
466 define float @test_shrink_float_convertible_constant_intrin_ceil() {
467 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
468 ; CHECK-NEXT: ret float 3.000000e+00
470 %E = call double @llvm.ceil.f64(double 2.1)
471 %F = fptrunc double %E to float
475 define float @test_shrink_float_convertible_constant_intrin_round() {
476 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_round(
477 ; CHECK-NEXT: ret float 2.000000e+00
479 %E = call double @llvm.round.f64(double 2.1)
480 %F = fptrunc double %E to float
484 define float @test_shrink_float_convertible_constant_intrin_roundeven() {
485 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_roundeven(
486 ; CHECK-NEXT: ret float 2.000000e+00
488 %E = call double @llvm.roundeven.f64(double 2.1)
489 %F = fptrunc double %E to float
493 define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
494 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
495 ; CHECK-NEXT: ret float 2.000000e+00
497 %E = call double @llvm.nearbyint.f64(double 2.1)
498 %F = fptrunc double %E to float
502 define float @test_shrink_float_convertible_constant_intrin_trunc() {
503 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
504 ; CHECK-NEXT: ret float 2.000000e+00
506 %E = call double @llvm.trunc.f64(double 2.1)
507 %F = fptrunc double %E to float
511 define float @test_shrink_float_convertible_constant_intrin_fabs() {
512 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
513 ; CHECK-NEXT: ret float 0x4000CCCCC0000000
515 %E = call double @llvm.fabs.f64(double 2.1)
516 %F = fptrunc double %E to float
520 ; Make sure fast math flags are preserved
521 define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
522 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
523 ; CHECK-NEXT: ret float 0x4000CCCCC0000000
525 %E = call fast double @llvm.fabs.f64(double 2.1)
526 %F = fptrunc double %E to float
530 define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
531 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
532 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
533 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
534 ; CHECK-NEXT: ret half [[F]]
536 %E = call double @llvm.floor.f64(double %D)
537 %F = fptrunc double %E to half
541 define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
542 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
543 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
544 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
545 ; CHECK-NEXT: ret half [[F]]
547 %E = call double @llvm.ceil.f64(double %D)
548 %F = fptrunc double %E to half
552 define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
553 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_round(
554 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
555 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
556 ; CHECK-NEXT: ret half [[F]]
558 %E = call double @llvm.round.f64(double %D)
559 %F = fptrunc double %E to half
563 define half @test_no_shrink_mismatched_type_intrin_roundeven(double %D) {
564 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_roundeven(
565 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
566 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
567 ; CHECK-NEXT: ret half [[F]]
569 %E = call double @llvm.roundeven.f64(double %D)
570 %F = fptrunc double %E to half
574 define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
575 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
576 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
577 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
578 ; CHECK-NEXT: ret half [[F]]
580 %E = call double @llvm.nearbyint.f64(double %D)
581 %F = fptrunc double %E to half
585 define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
586 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
587 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
588 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
589 ; CHECK-NEXT: ret half [[F]]
591 %E = call double @llvm.trunc.f64(double %D)
592 %F = fptrunc double %E to half
596 define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
597 ; CHECK-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
598 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
599 ; CHECK-NEXT: [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
600 ; CHECK-NEXT: ret half [[F]]
602 %E = call double @llvm.fabs.f64(double %D)
603 %F = fptrunc double %E to half
607 ; Make sure fast math flags are preserved
608 define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
609 ; CHECK-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
610 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
611 ; CHECK-NEXT: [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
612 ; CHECK-NEXT: ret half [[F]]
614 %E = call fast double @llvm.fabs.f64(double %D)
615 %F = fptrunc double %E to half
619 define <2 x double> @test_shrink_intrin_floor_fp16_vec(<2 x half> %C) {
620 ; CHECK-LABEL: @test_shrink_intrin_floor_fp16_vec(
621 ; CHECK-NEXT: [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
622 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
623 ; CHECK-NEXT: ret <2 x double> [[E]]
625 %D = fpext <2 x half> %C to <2 x double>
626 %E = call arcp <2 x double> @llvm.floor.v2f64(<2 x double> %D)
630 define float @test_shrink_intrin_ceil_fp16_src(half %C) {
631 ; CHECK-LABEL: @test_shrink_intrin_ceil_fp16_src(
632 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
633 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
634 ; CHECK-NEXT: ret float [[F]]
636 %D = fpext half %C to double
637 %E = call double @llvm.ceil.f64(double %D)
638 %F = fptrunc double %E to float
642 define <2 x double> @test_shrink_intrin_round_fp16_vec(<2 x half> %C) {
643 ; CHECK-LABEL: @test_shrink_intrin_round_fp16_vec(
644 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
645 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
646 ; CHECK-NEXT: ret <2 x double> [[E]]
648 %D = fpext <2 x half> %C to <2 x double>
649 %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
653 define <2 x double> @test_shrink_intrin_roundeven_fp16_vec(<2 x half> %C) {
654 ; CHECK-LABEL: @test_shrink_intrin_roundeven_fp16_vec(
655 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.roundeven.v2f16(<2 x half> [[C:%.*]])
656 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
657 ; CHECK-NEXT: ret <2 x double> [[E]]
659 %D = fpext <2 x half> %C to <2 x double>
660 %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
664 define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
665 ; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
666 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
667 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
668 ; CHECK-NEXT: ret float [[F]]
670 %D = fpext half %C to double
671 %E = call double @llvm.nearbyint.f64(double %D)
672 %F = fptrunc double %E to float
676 define <2 x double> @test_shrink_intrin_trunc_fp16_src(<2 x half> %C) {
677 ; CHECK-LABEL: @test_shrink_intrin_trunc_fp16_src(
678 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
679 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
680 ; CHECK-NEXT: ret <2 x double> [[E]]
682 %D = fpext <2 x half> %C to <2 x double>
683 %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
687 define float @test_shrink_intrin_fabs_fp16_src(half %C) {
688 ; CHECK-LABEL: @test_shrink_intrin_fabs_fp16_src(
689 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
690 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
691 ; CHECK-NEXT: ret float [[F]]
693 %D = fpext half %C to double
694 %E = call double @llvm.fabs.f64(double %D)
695 %F = fptrunc double %E to float
699 ; Make sure fast math flags are preserved
700 define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
701 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
702 ; CHECK-NEXT: [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
703 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
704 ; CHECK-NEXT: ret float [[F]]
706 %D = fpext half %C to double
707 %E = call fast double @llvm.fabs.f64(double %D)
708 %F = fptrunc double %E to float
712 define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
713 ; DOUBLE-4BYTE-ALIGN-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
714 ; DOUBLE-4BYTE-ALIGN-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
715 ; DOUBLE-4BYTE-ALIGN-NEXT: store volatile double [[D]], ptr undef, align 4
716 ; DOUBLE-4BYTE-ALIGN-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
717 ; DOUBLE-4BYTE-ALIGN-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
718 ; DOUBLE-4BYTE-ALIGN-NEXT: ret float [[F]]
720 ; DOUBLE-8BYTE-ALIGN-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
721 ; DOUBLE-8BYTE-ALIGN-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
722 ; DOUBLE-8BYTE-ALIGN-NEXT: store volatile double [[D]], ptr undef, align 8
723 ; DOUBLE-8BYTE-ALIGN-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
724 ; DOUBLE-8BYTE-ALIGN-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
725 ; DOUBLE-8BYTE-ALIGN-NEXT: ret float [[F]]
727 %D = fpext half %C to double
728 store volatile double %D, ptr undef
729 %E = call double @llvm.floor.f64(double %D)
730 %F = fptrunc double %E to float
734 define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
735 ; DOUBLE-4BYTE-ALIGN-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
736 ; DOUBLE-4BYTE-ALIGN-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
737 ; DOUBLE-4BYTE-ALIGN-NEXT: store volatile double [[D]], ptr undef, align 4
738 ; DOUBLE-4BYTE-ALIGN-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
739 ; DOUBLE-4BYTE-ALIGN-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
740 ; DOUBLE-4BYTE-ALIGN-NEXT: ret float [[F]]
742 ; DOUBLE-8BYTE-ALIGN-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
743 ; DOUBLE-8BYTE-ALIGN-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
744 ; DOUBLE-8BYTE-ALIGN-NEXT: store volatile double [[D]], ptr undef, align 8
745 ; DOUBLE-8BYTE-ALIGN-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
746 ; DOUBLE-8BYTE-ALIGN-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
747 ; DOUBLE-8BYTE-ALIGN-NEXT: ret float [[F]]
749 %D = fpext half %C to double
750 store volatile double %D, ptr undef
751 %E = call double @llvm.fabs.f64(double %D)
752 %F = fptrunc double %E to float
756 ; DBG-VALID: CheckModuleDebugify: PASS