1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux" | FileCheck %s
3 ; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32" | FileCheck %s
4 ; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" | FileCheck %s
5 ; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32" | FileCheck %s
6 ; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck %s
7 ; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck %s
8 ; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 | FileCheck --check-prefix=DBG-VALID %s
10 declare double @floor(double)
11 declare double @ceil(double)
12 declare double @round(double)
13 declare double @roundeven(double)
14 declare double @nearbyint(double)
15 declare double @trunc(double)
16 declare double @fabs(double)
18 declare double @llvm.ceil.f64(double)
19 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
21 declare double @llvm.fabs.f64(double)
22 declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
24 declare double @llvm.floor.f64(double)
25 declare <2 x double> @llvm.floor.v2f64(<2 x double>)
27 declare double @llvm.nearbyint.f64(double)
28 declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
30 declare float @llvm.rint.f32(float)
31 declare <2 x float> @llvm.rint.v2f32(<2 x float>)
33 declare double @llvm.round.f64(double)
34 declare <2 x double> @llvm.round.v2f64(<2 x double>)
36 declare double @llvm.roundeven.f64(double)
37 declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
39 declare double @llvm.trunc.f64(double)
40 declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
42 define float @test_shrink_libcall_floor(float %C) {
43 ; CHECK-LABEL: @test_shrink_libcall_floor(
44 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
45 ; CHECK-NEXT: ret float [[F]]
47 %D = fpext float %C to double
49 %E = call double @floor(double %D)
50 %F = fptrunc double %E to float
54 define float @test_shrink_libcall_ceil(float %C) {
55 ; CHECK-LABEL: @test_shrink_libcall_ceil(
56 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
57 ; CHECK-NEXT: ret float [[F]]
59 %D = fpext float %C to double
61 %E = call double @ceil(double %D)
62 %F = fptrunc double %E to float
66 define float @test_shrink_libcall_round(float %C) {
67 ; CHECK-LABEL: @test_shrink_libcall_round(
68 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
69 ; CHECK-NEXT: ret float [[F]]
71 %D = fpext float %C to double
73 %E = call double @round(double %D)
74 %F = fptrunc double %E to float
78 define float @test_shrink_libcall_roundeven(float %C) {
79 ; CHECK-LABEL: @test_shrink_libcall_roundeven(
80 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
81 ; CHECK-NEXT: ret float [[F]]
83 %D = fpext float %C to double
85 %E = call double @roundeven(double %D)
86 %F = fptrunc double %E to float
90 define float @test_shrink_libcall_nearbyint(float %C) {
91 ; CHECK-LABEL: @test_shrink_libcall_nearbyint(
92 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
93 ; CHECK-NEXT: ret float [[F]]
95 %D = fpext float %C to double
97 %E = call double @nearbyint(double %D)
98 %F = fptrunc double %E to float
102 define float @test_shrink_libcall_trunc(float %C) {
103 ; CHECK-LABEL: @test_shrink_libcall_trunc(
104 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
105 ; CHECK-NEXT: ret float [[F]]
107 %D = fpext float %C to double
109 %E = call double @trunc(double %D)
110 %F = fptrunc double %E to float
114 ; This is replaced with the intrinsic, which does the right thing on
116 define float @test_shrink_libcall_fabs(float %C) {
117 ; CHECK-LABEL: @test_shrink_libcall_fabs(
118 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
119 ; CHECK-NEXT: ret float [[F]]
121 %D = fpext float %C to double
122 %E = call double @fabs(double %D)
123 %F = fptrunc double %E to float
127 ; Make sure fast math flags are preserved
128 define float @test_shrink_libcall_fabs_fast(float %C) {
129 ; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
130 ; CHECK-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
131 ; CHECK-NEXT: ret float [[F]]
133 %D = fpext float %C to double
134 %E = call fast double @fabs(double %D)
135 %F = fptrunc double %E to float
139 define float @test_shrink_intrin_ceil(float %C) {
140 ; CHECK-LABEL: @test_shrink_intrin_ceil(
141 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
142 ; CHECK-NEXT: ret float [[TMP1]]
144 %D = fpext float %C to double
145 %E = call double @llvm.ceil.f64(double %D)
146 %F = fptrunc double %E to float
150 define float @test_shrink_intrin_fabs(float %C) {
151 ; CHECK-LABEL: @test_shrink_intrin_fabs(
152 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
153 ; CHECK-NEXT: ret float [[TMP1]]
155 %D = fpext float %C to double
156 %E = call double @llvm.fabs.f64(double %D)
157 %F = fptrunc double %E to float
161 define float @test_shrink_intrin_floor(float %C) {
162 ; CHECK-LABEL: @test_shrink_intrin_floor(
163 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
164 ; CHECK-NEXT: ret float [[TMP1]]
166 %D = fpext float %C to double
167 %E = call double @llvm.floor.f64(double %D)
168 %F = fptrunc double %E to float
172 define float @test_shrink_intrin_nearbyint(float %C) {
173 ; CHECK-LABEL: @test_shrink_intrin_nearbyint(
174 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
175 ; CHECK-NEXT: ret float [[TMP1]]
177 %D = fpext float %C to double
178 %E = call double @llvm.nearbyint.f64(double %D)
179 %F = fptrunc double %E to float
183 define half @test_shrink_intrin_rint(half %C) {
184 ; CHECK-LABEL: @test_shrink_intrin_rint(
185 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
186 ; CHECK-NEXT: ret half [[TMP1]]
188 %D = fpext half %C to float
189 %E = call float @llvm.rint.f32(float %D)
190 %F = fptrunc float %E to half
194 define float @test_shrink_intrin_round(float %C) {
195 ; CHECK-LABEL: @test_shrink_intrin_round(
196 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
197 ; CHECK-NEXT: ret float [[TMP1]]
199 %D = fpext float %C to double
200 %E = call double @llvm.round.f64(double %D)
201 %F = fptrunc double %E to float
205 define float @test_shrink_intrin_roundeven(float %C) {
206 ; CHECK-LABEL: @test_shrink_intrin_roundeven(
207 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
208 ; CHECK-NEXT: ret float [[TMP1]]
210 %D = fpext float %C to double
211 %E = call double @llvm.roundeven.f64(double %D)
212 %F = fptrunc double %E to float
216 define float @test_shrink_intrin_trunc(float %C) {
217 ; CHECK-LABEL: @test_shrink_intrin_trunc(
218 ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
219 ; CHECK-NEXT: ret float [[TMP1]]
221 %D = fpext float %C to double
222 %E = call double @llvm.trunc.f64(double %D)
223 %F = fptrunc double %E to float
227 declare void @use_v2f64(<2 x double>)
228 declare void @use_v2f32(<2 x float>)
230 define <2 x float> @test_shrink_intrin_ceil_multi_use(<2 x float> %C) {
231 ; CHECK-LABEL: @test_shrink_intrin_ceil_multi_use(
232 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
233 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
234 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
235 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
236 ; CHECK-NEXT: ret <2 x float> [[F]]
238 %D = fpext <2 x float> %C to <2 x double>
239 %E = call <2 x double> @llvm.ceil.v2f64(<2 x double> %D)
240 %F = fptrunc <2 x double> %E to <2 x float>
241 call void @use_v2f64(<2 x double> %D)
245 define <2 x float> @test_shrink_intrin_fabs_multi_use(<2 x float> %C) {
246 ; CHECK-LABEL: @test_shrink_intrin_fabs_multi_use(
247 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
248 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
249 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
250 ; CHECK-NEXT: ret <2 x float> [[TMP1]]
252 %D = fpext <2 x float> %C to <2 x double>
253 %E = call <2 x double> @llvm.fabs.v2f64(<2 x double> %D)
254 %F = fptrunc <2 x double> %E to <2 x float>
255 call void @use_v2f64(<2 x double> %E)
259 define <2 x float> @test_shrink_intrin_floor_multi_use(<2 x float> %C) {
260 ; CHECK-LABEL: @test_shrink_intrin_floor_multi_use(
261 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
262 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
263 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
264 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
265 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
266 ; CHECK-NEXT: ret <2 x float> [[F]]
268 %D = fpext <2 x float> %C to <2 x double>
269 %E = call <2 x double> @llvm.floor.v2f64(<2 x double> %D)
270 %F = fptrunc <2 x double> %E to <2 x float>
271 call void @use_v2f64(<2 x double> %D)
272 call void @use_v2f64(<2 x double> %E)
276 define <2 x float> @test_shrink_intrin_nearbyint_multi_use(<2 x float> %C) {
277 ; CHECK-LABEL: @test_shrink_intrin_nearbyint_multi_use(
278 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
279 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
280 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
281 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
282 ; CHECK-NEXT: ret <2 x float> [[F]]
284 %D = fpext <2 x float> %C to <2 x double>
285 %E = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %D)
286 %F = fptrunc <2 x double> %E to <2 x float>
287 call void @use_v2f64(<2 x double> %D)
291 define <2 x half> @test_shrink_intrin_rint_multi_use(<2 x half> %C) {
292 ; CHECK-LABEL: @test_shrink_intrin_rint_multi_use(
293 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
294 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
295 ; CHECK-NEXT: call void @use_v2f32(<2 x float> [[E]])
296 ; CHECK-NEXT: ret <2 x half> [[TMP1]]
298 %D = fpext <2 x half> %C to <2 x float>
299 %E = call <2 x float> @llvm.rint.v2f32(<2 x float> %D)
300 %F = fptrunc <2 x float> %E to <2 x half>
301 call void @use_v2f32(<2 x float> %E)
305 define <2 x float> @test_shrink_intrin_round_multi_use(<2 x float> %C) {
306 ; CHECK-LABEL: @test_shrink_intrin_round_multi_use(
307 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
308 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
309 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
310 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
311 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
312 ; CHECK-NEXT: ret <2 x float> [[F]]
314 %D = fpext <2 x float> %C to <2 x double>
315 %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
316 %F = fptrunc <2 x double> %E to <2 x float>
317 call void @use_v2f64(<2 x double> %D)
318 call void @use_v2f64(<2 x double> %E)
322 define <2 x float> @test_shrink_intrin_roundeven_multi_use(<2 x float> %C) {
323 ; CHECK-LABEL: @test_shrink_intrin_roundeven_multi_use(
324 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
325 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[D]])
326 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
327 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
328 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
329 ; CHECK-NEXT: ret <2 x float> [[F]]
331 %D = fpext <2 x float> %C to <2 x double>
332 %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
333 %F = fptrunc <2 x double> %E to <2 x float>
334 call void @use_v2f64(<2 x double> %D)
335 call void @use_v2f64(<2 x double> %E)
339 define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
340 ; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
341 ; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
342 ; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
343 ; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
344 ; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
345 ; CHECK-NEXT: ret <2 x float> [[F]]
347 %D = fpext <2 x float> %C to <2 x double>
348 %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
349 %F = fptrunc <2 x double> %E to <2 x float>
350 call void @use_v2f64(<2 x double> %D)
354 ; Make sure fast math flags are preserved
355 define float @test_shrink_intrin_fabs_fast(float %C) {
356 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast(
357 ; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
358 ; CHECK-NEXT: ret float [[TMP1]]
360 %D = fpext float %C to double
361 %E = call fast double @llvm.fabs.f64(double %D)
362 %F = fptrunc double %E to float
366 define float @test_no_shrink_intrin_floor(double %D) {
367 ; CHECK-LABEL: @test_no_shrink_intrin_floor(
368 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
369 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
370 ; CHECK-NEXT: ret float [[F]]
372 %E = call double @llvm.floor.f64(double %D)
373 %F = fptrunc double %E to float
377 define float @test_no_shrink_intrin_ceil(double %D) {
378 ; CHECK-LABEL: @test_no_shrink_intrin_ceil(
379 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
380 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
381 ; CHECK-NEXT: ret float [[F]]
383 %E = call double @llvm.ceil.f64(double %D)
384 %F = fptrunc double %E to float
388 define float @test_no_shrink_intrin_round(double %D) {
389 ; CHECK-LABEL: @test_no_shrink_intrin_round(
390 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
391 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
392 ; CHECK-NEXT: ret float [[F]]
394 %E = call double @llvm.round.f64(double %D)
395 %F = fptrunc double %E to float
399 define float @test_no_shrink_intrin_roundeven(double %D) {
400 ; CHECK-LABEL: @test_no_shrink_intrin_roundeven(
401 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
402 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
403 ; CHECK-NEXT: ret float [[F]]
405 %E = call double @llvm.roundeven.f64(double %D)
406 %F = fptrunc double %E to float
410 define float @test_no_shrink_intrin_nearbyint(double %D) {
411 ; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
412 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
413 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
414 ; CHECK-NEXT: ret float [[F]]
416 %E = call double @llvm.nearbyint.f64(double %D)
417 %F = fptrunc double %E to float
421 define float @test_no_shrink_intrin_trunc(double %D) {
422 ; CHECK-LABEL: @test_no_shrink_intrin_trunc(
423 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
424 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
425 ; CHECK-NEXT: ret float [[F]]
427 %E = call double @llvm.trunc.f64(double %D)
428 %F = fptrunc double %E to float
432 define float @test_shrink_intrin_fabs_double_src(double %D) {
433 ; CHECK-LABEL: @test_shrink_intrin_fabs_double_src(
434 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
435 ; CHECK-NEXT: [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
436 ; CHECK-NEXT: ret float [[F]]
438 %E = call double @llvm.fabs.f64(double %D)
439 %F = fptrunc double %E to float
443 ; Make sure fast math flags are preserved
444 define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
445 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast_double_src(
446 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
447 ; CHECK-NEXT: [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
448 ; CHECK-NEXT: ret float [[F]]
450 %E = call fast double @llvm.fabs.f64(double %D)
451 %F = fptrunc double %E to float
455 define float @test_shrink_float_convertible_constant_intrin_floor() {
456 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
457 ; CHECK-NEXT: ret float 2.000000e+00
459 %E = call double @llvm.floor.f64(double 2.1)
460 %F = fptrunc double %E to float
464 define float @test_shrink_float_convertible_constant_intrin_ceil() {
465 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
466 ; CHECK-NEXT: ret float 3.000000e+00
468 %E = call double @llvm.ceil.f64(double 2.1)
469 %F = fptrunc double %E to float
473 define float @test_shrink_float_convertible_constant_intrin_round() {
474 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_round(
475 ; CHECK-NEXT: ret float 2.000000e+00
477 %E = call double @llvm.round.f64(double 2.1)
478 %F = fptrunc double %E to float
482 define float @test_shrink_float_convertible_constant_intrin_roundeven() {
483 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_roundeven(
484 ; CHECK-NEXT: ret float 2.000000e+00
486 %E = call double @llvm.roundeven.f64(double 2.1)
487 %F = fptrunc double %E to float
491 define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
492 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
493 ; CHECK-NEXT: ret float 2.000000e+00
495 %E = call double @llvm.nearbyint.f64(double 2.1)
496 %F = fptrunc double %E to float
500 define float @test_shrink_float_convertible_constant_intrin_trunc() {
501 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
502 ; CHECK-NEXT: ret float 2.000000e+00
504 %E = call double @llvm.trunc.f64(double 2.1)
505 %F = fptrunc double %E to float
509 define float @test_shrink_float_convertible_constant_intrin_fabs() {
510 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
511 ; CHECK-NEXT: ret float 0x4000CCCCC0000000
513 %E = call double @llvm.fabs.f64(double 2.1)
514 %F = fptrunc double %E to float
518 ; Make sure fast math flags are preserved
519 define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
520 ; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
521 ; CHECK-NEXT: ret float 0x4000CCCCC0000000
523 %E = call fast double @llvm.fabs.f64(double 2.1)
524 %F = fptrunc double %E to float
528 define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
529 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
530 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
531 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
532 ; CHECK-NEXT: ret half [[F]]
534 %E = call double @llvm.floor.f64(double %D)
535 %F = fptrunc double %E to half
539 define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
540 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
541 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
542 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
543 ; CHECK-NEXT: ret half [[F]]
545 %E = call double @llvm.ceil.f64(double %D)
546 %F = fptrunc double %E to half
550 define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
551 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_round(
552 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
553 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
554 ; CHECK-NEXT: ret half [[F]]
556 %E = call double @llvm.round.f64(double %D)
557 %F = fptrunc double %E to half
561 define half @test_no_shrink_mismatched_type_intrin_roundeven(double %D) {
562 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_roundeven(
563 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
564 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
565 ; CHECK-NEXT: ret half [[F]]
567 %E = call double @llvm.roundeven.f64(double %D)
568 %F = fptrunc double %E to half
572 define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
573 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
574 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
575 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
576 ; CHECK-NEXT: ret half [[F]]
578 %E = call double @llvm.nearbyint.f64(double %D)
579 %F = fptrunc double %E to half
583 define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
584 ; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
585 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
586 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
587 ; CHECK-NEXT: ret half [[F]]
589 %E = call double @llvm.trunc.f64(double %D)
590 %F = fptrunc double %E to half
594 define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
595 ; CHECK-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
596 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
597 ; CHECK-NEXT: [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
598 ; CHECK-NEXT: ret half [[F]]
600 %E = call double @llvm.fabs.f64(double %D)
601 %F = fptrunc double %E to half
605 ; Make sure fast math flags are preserved
606 define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
607 ; CHECK-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
608 ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
609 ; CHECK-NEXT: [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
610 ; CHECK-NEXT: ret half [[F]]
612 %E = call fast double @llvm.fabs.f64(double %D)
613 %F = fptrunc double %E to half
617 define <2 x double> @test_shrink_intrin_floor_fp16_vec(<2 x half> %C) {
618 ; CHECK-LABEL: @test_shrink_intrin_floor_fp16_vec(
619 ; CHECK-NEXT: [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
620 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
621 ; CHECK-NEXT: ret <2 x double> [[E]]
623 %D = fpext <2 x half> %C to <2 x double>
624 %E = call arcp <2 x double> @llvm.floor.v2f64(<2 x double> %D)
628 define float @test_shrink_intrin_ceil_fp16_src(half %C) {
629 ; CHECK-LABEL: @test_shrink_intrin_ceil_fp16_src(
630 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
631 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
632 ; CHECK-NEXT: ret float [[F]]
634 %D = fpext half %C to double
635 %E = call double @llvm.ceil.f64(double %D)
636 %F = fptrunc double %E to float
640 define <2 x double> @test_shrink_intrin_round_fp16_vec(<2 x half> %C) {
641 ; CHECK-LABEL: @test_shrink_intrin_round_fp16_vec(
642 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
643 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
644 ; CHECK-NEXT: ret <2 x double> [[E]]
646 %D = fpext <2 x half> %C to <2 x double>
647 %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
651 define <2 x double> @test_shrink_intrin_roundeven_fp16_vec(<2 x half> %C) {
652 ; CHECK-LABEL: @test_shrink_intrin_roundeven_fp16_vec(
653 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.roundeven.v2f16(<2 x half> [[C:%.*]])
654 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
655 ; CHECK-NEXT: ret <2 x double> [[E]]
657 %D = fpext <2 x half> %C to <2 x double>
658 %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
662 define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
663 ; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
664 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
665 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
666 ; CHECK-NEXT: ret float [[F]]
668 %D = fpext half %C to double
669 %E = call double @llvm.nearbyint.f64(double %D)
670 %F = fptrunc double %E to float
674 define <2 x double> @test_shrink_intrin_trunc_fp16_src(<2 x half> %C) {
675 ; CHECK-LABEL: @test_shrink_intrin_trunc_fp16_src(
676 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
677 ; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
678 ; CHECK-NEXT: ret <2 x double> [[E]]
680 %D = fpext <2 x half> %C to <2 x double>
681 %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
685 define float @test_shrink_intrin_fabs_fp16_src(half %C) {
686 ; CHECK-LABEL: @test_shrink_intrin_fabs_fp16_src(
687 ; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
688 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
689 ; CHECK-NEXT: ret float [[F]]
691 %D = fpext half %C to double
692 %E = call double @llvm.fabs.f64(double %D)
693 %F = fptrunc double %E to float
697 ; Make sure fast math flags are preserved
698 define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
699 ; CHECK-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
700 ; CHECK-NEXT: [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
701 ; CHECK-NEXT: [[F:%.*]] = fpext half [[TMP1]] to float
702 ; CHECK-NEXT: ret float [[F]]
704 %D = fpext half %C to double
705 %E = call fast double @llvm.fabs.f64(double %D)
706 %F = fptrunc double %E to float
710 define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
711 ; CHECK-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
712 ; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
713 ; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
714 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
715 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
716 ; CHECK-NEXT: ret float [[F]]
718 %D = fpext half %C to double
719 store volatile double %D, double* undef
720 %E = call double @llvm.floor.f64(double %D)
721 %F = fptrunc double %E to float
725 define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
726 ; CHECK-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
727 ; CHECK-NEXT: [[D:%.*]] = fpext half [[C:%.*]] to double
728 ; CHECK-NEXT: store volatile double [[D]], double* undef, align 8
729 ; CHECK-NEXT: [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
730 ; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
731 ; CHECK-NEXT: ret float [[F]]
733 %D = fpext half %C to double
734 store volatile double %D, double* undef
735 %E = call double @llvm.fabs.f64(double %D)
736 %F = fptrunc double %E to float
740 ; DBG-VALID: CheckModuleDebugify: PASS