1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck -check-prefixes=X64 %s
3 ; XUN: llc -mtriple=i386-pc-win32 < %s | FileCheck -check-prefix=WIN32 %s
4 ; FIXME: Expansion support without libcalls
6 ; FIXME: Implement f16->f32 promotion for strictfp
7 ; define half @test_strict_ldexp_f16_i32(ptr addrspace(1) %out, half %a, i32 %b) #2 {
8 ; %result = call half @llvm.experimental.constrained.ldexp.f16.i32(half %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
12 define float @test_strict_ldexp_f32_i32(ptr addrspace(1) %out, float %a, i32 %b) #2 {
13 ; X64-LABEL: test_strict_ldexp_f32_i32:
15 ; X64-NEXT: pushq %rax
16 ; X64-NEXT: .cfi_def_cfa_offset 16
17 ; X64-NEXT: movl %esi, %edi
18 ; X64-NEXT: callq ldexpf@PLT
20 ; X64-NEXT: .cfi_def_cfa_offset 8
22 %result = call float @llvm.experimental.constrained.ldexp.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
26 define double @test_strict_ldexp_f64_i32(ptr addrspace(1) %out, double %a, i32 %b) #2 {
27 ; X64-LABEL: test_strict_ldexp_f64_i32:
29 ; X64-NEXT: pushq %rax
30 ; X64-NEXT: .cfi_def_cfa_offset 16
31 ; X64-NEXT: movl %esi, %edi
32 ; X64-NEXT: callq ldexp@PLT
34 ; X64-NEXT: .cfi_def_cfa_offset 8
36 %result = call double @llvm.experimental.constrained.ldexp.f64.i32(double %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
41 define <2 x float> @test_strict_ldexp_v2f32_v2i32(ptr addrspace(1) %out, <2 x float> %a, <2 x i32> %b) #2 {
42 ; X64-LABEL: test_strict_ldexp_v2f32_v2i32:
44 ; X64-NEXT: subq $56, %rsp
45 ; X64-NEXT: .cfi_def_cfa_offset 64
46 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
47 ; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
48 ; X64-NEXT: movd %xmm1, %edi
49 ; X64-NEXT: callq ldexpf@PLT
50 ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
51 ; X64-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
52 ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
53 ; X64-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
54 ; X64-NEXT: # xmm1 = mem[1,1,1,1]
55 ; X64-NEXT: movd %xmm1, %edi
56 ; X64-NEXT: callq ldexpf@PLT
57 ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
58 ; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
59 ; X64-NEXT: movaps %xmm1, %xmm0
60 ; X64-NEXT: addq $56, %rsp
61 ; X64-NEXT: .cfi_def_cfa_offset 8
63 %result = call <2 x float> @llvm.experimental.constrained.ldexp.v2f32.v2i32(<2 x float> %a, <2 x i32> %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
64 ret <2 x float> %result
67 declare half @llvm.experimental.constrained.ldexp.f16.i32(half, i32, metadata, metadata) #1
68 declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata) #1
69 declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata) #1
70 declare x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80, i32, metadata, metadata) #1
71 declare <2 x float> @llvm.experimental.constrained.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>, metadata, metadata) #1
73 attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
74 attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
75 attributes #2 = { strictfp }